aboutsummaryrefslogtreecommitdiff
path: root/sys/dev/ixl
diff options
context:
space:
mode:
authorEric Joyner <erj@FreeBSD.org>2020-06-09 22:42:54 +0000
committerEric Joyner <erj@FreeBSD.org>2020-06-09 22:42:54 +0000
commitb4a7ce0690aedd9763b3b47ee7fcdb421f0434c7 (patch)
tree6a930d04bebc0454b7d4274fcd3455748cd0a1d7 /sys/dev/ixl
parenta3d565a1188f2e57bf70e2949d353d27ef1f1606 (diff)
downloadsrc-b4a7ce0690aedd9763b3b47ee7fcdb421f0434c7.tar.gz
src-b4a7ce0690aedd9763b3b47ee7fcdb421f0434c7.zip
ixl(4): Add FW recovery mode support and other things
Update the iflib version of ixl driver based on the OOT version ixl-1.11.29. Major changes: - Extract iflib specific functions from ixl_pf_main.c to ixl_pf_iflib.c to simplify code sharing between legacy and iflib version of driver - Add support for most recent FW API version (1.10), which extends FW LLDP Agent control by user to X722 devices - Improve handling of device global reset - Add support for the FW recovery mode - Use virtchnl function to validate virtual channel messages instead of using separate checks - Fix MAC/VLAN filters accounting Submitted by: Krzysztof Galazka <krzysztof.galazka@intel.com> Reviewed by: erj@ Tested by: Jeffrey Pieper <jeffrey.e.pieper@intel.com> MFC after: 1 week Relnotes: yes Sponsored by: Intel Corporation Differential Revision: https://reviews.freebsd.org/D24564
Notes
Notes: svn path=/head/; revision=361992
Diffstat (limited to 'sys/dev/ixl')
-rw-r--r--sys/dev/ixl/i40e_adminq.c138
-rw-r--r--sys/dev/ixl/i40e_adminq_cmd.h231
-rw-r--r--sys/dev/ixl/i40e_common.c590
-rw-r--r--sys/dev/ixl/i40e_dcb.c91
-rw-r--r--sys/dev/ixl/i40e_dcb.h18
-rw-r--r--sys/dev/ixl/i40e_devids.h2
-rw-r--r--sys/dev/ixl/i40e_lan_hmc.c15
-rw-r--r--sys/dev/ixl/i40e_nvm.c115
-rw-r--r--sys/dev/ixl/i40e_osdep.c28
-rw-r--r--sys/dev/ixl/i40e_prototype.h83
-rw-r--r--sys/dev/ixl/i40e_register.h38
-rw-r--r--sys/dev/ixl/i40e_type.h38
-rw-r--r--sys/dev/ixl/if_iavf.c2
-rw-r--r--sys/dev/ixl/if_ixl.c255
-rw-r--r--sys/dev/ixl/ixl.h30
-rw-r--r--sys/dev/ixl/ixl_pf.h93
-rw-r--r--sys/dev/ixl/ixl_pf_i2c.c14
-rw-r--r--sys/dev/ixl/ixl_pf_iflib.c1137
-rw-r--r--sys/dev/ixl/ixl_pf_iov.c253
-rw-r--r--sys/dev/ixl/ixl_pf_main.c2140
-rw-r--r--sys/dev/ixl/ixl_txrx.c9
21 files changed, 3128 insertions, 2192 deletions
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
index 438a89504f74..d25798d23bd1 100644
--- a/sys/dev/ixl/i40e_adminq.c
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -125,6 +125,7 @@ enum i40e_status_code i40e_alloc_adminq_arq_ring(struct i40e_hw *hw)
**/
void i40e_free_adminq_asq(struct i40e_hw *hw)
{
+ i40e_free_virt_mem(hw, &hw->aq.asq.cmd_buf);
i40e_free_dma_mem(hw, &hw->aq.asq.desc_buf);
}
@@ -404,7 +405,7 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
/* initialize base registers */
ret_code = i40e_config_asq_regs(hw);
if (ret_code != I40E_SUCCESS)
- goto init_adminq_free_rings;
+ goto init_config_regs;
/* success! */
hw->aq.asq.count = hw->aq.num_asq_entries;
@@ -412,6 +413,10 @@ enum i40e_status_code i40e_init_asq(struct i40e_hw *hw)
init_adminq_free_rings:
i40e_free_adminq_asq(hw);
+ return ret_code;
+
+init_config_regs:
+ i40e_free_asq_bufs(hw);
init_adminq_exit:
return ret_code;
@@ -563,6 +568,70 @@ static void i40e_resume_aq(struct i40e_hw *hw)
}
/**
+ * i40e_set_hw_flags - set HW flags
+ * @hw: pointer to the hardware structure
+ **/
+static void i40e_set_hw_flags(struct i40e_hw *hw)
+{
+ struct i40e_adminq_info *aq = &hw->aq;
+
+ hw->flags = 0;
+
+ switch (hw->mac.type) {
+ case I40E_MAC_XL710:
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710)) {
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+ /* The ability to RX (not drop) 802.1ad frames */
+ hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
+ }
+ break;
+ case I40E_MAC_X722:
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
+ I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722))
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_STOPPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_X722))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= I40E_MINOR_VER_FW_REQUEST_FEC_X722))
+ hw->flags |= I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE;
+
+ /* fall through */
+ default:
+ break;
+ }
+
+ /* Newer versions of firmware require lock when reading the NVM */
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 5))
+ hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 8)) {
+ hw->flags |= I40E_HW_FLAG_FW_LLDP_PERSISTENT;
+ hw->flags |= I40E_HW_FLAG_DROP_MODE;
+ }
+
+ if (aq->api_maj_ver > 1 ||
+ (aq->api_maj_ver == 1 &&
+ aq->api_min_ver >= 9))
+ hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED;
+}
+
+/**
* i40e_init_adminq - main initialization routine for Admin Queue
* @hw: pointer to the hardware structure
*
@@ -575,21 +644,22 @@ static void i40e_resume_aq(struct i40e_hw *hw)
**/
enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
{
+ struct i40e_adminq_info *aq = &hw->aq;
+ enum i40e_status_code ret_code;
u16 cfg_ptr, oem_hi, oem_lo;
u16 eetrack_lo, eetrack_hi;
- enum i40e_status_code ret_code;
int retry = 0;
/* verify input for valid configuration */
- if ((hw->aq.num_arq_entries == 0) ||
- (hw->aq.num_asq_entries == 0) ||
- (hw->aq.arq_buf_size == 0) ||
- (hw->aq.asq_buf_size == 0)) {
+ if (aq->num_arq_entries == 0 ||
+ aq->num_asq_entries == 0 ||
+ aq->arq_buf_size == 0 ||
+ aq->asq_buf_size == 0) {
ret_code = I40E_ERR_CONFIG;
goto init_adminq_exit;
}
- i40e_init_spinlock(&hw->aq.asq_spinlock);
- i40e_init_spinlock(&hw->aq.arq_spinlock);
+ i40e_init_spinlock(&aq->asq_spinlock);
+ i40e_init_spinlock(&aq->arq_spinlock);
/* Set up register offsets */
i40e_adminq_init_regs(hw);
@@ -616,11 +686,11 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
*/
do {
ret_code = i40e_aq_get_firmware_version(hw,
- &hw->aq.fw_maj_ver,
- &hw->aq.fw_min_ver,
- &hw->aq.fw_build,
- &hw->aq.api_maj_ver,
- &hw->aq.api_min_ver,
+ &aq->fw_maj_ver,
+ &aq->fw_min_ver,
+ &aq->fw_build,
+ &aq->api_maj_ver,
+ &aq->api_min_ver,
NULL);
if (ret_code != I40E_ERR_ADMIN_QUEUE_TIMEOUT)
break;
@@ -631,6 +701,12 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
if (ret_code != I40E_SUCCESS)
goto init_adminq_free_arq;
+ /*
+ * Some features were introduced in different FW API version
+ * for different MAC type.
+ */
+ i40e_set_hw_flags(hw);
+
/* get the NVM version info */
i40e_read_nvm_word(hw, I40E_SR_NVM_DEV_STARTER_VERSION,
&hw->nvm.version);
@@ -644,25 +720,7 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
&oem_lo);
hw->nvm.oem_ver = ((u32)oem_hi << 16) | oem_lo;
- /* The ability to RX (not drop) 802.1ad frames was added in API 1.7 */
- if ((hw->aq.api_maj_ver > 1) ||
- ((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver >= 7)))
- hw->flags |= I40E_HW_FLAG_802_1AD_CAPABLE;
-
- if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= I40E_MINOR_VER_GET_LINK_INFO_XL710) {
- hw->flags |= I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE;
- }
-
- /* Newer versions of firmware require lock when reading the NVM */
- if ((hw->aq.api_maj_ver > 1) ||
- ((hw->aq.api_maj_ver == 1) &&
- (hw->aq.api_min_ver >= 5)))
- hw->flags |= I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
-
- if (hw->aq.api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
+ if (aq->api_maj_ver > I40E_FW_API_VERSION_MAJOR) {
ret_code = I40E_ERR_FIRMWARE_API_VERSION;
goto init_adminq_free_arq;
}
@@ -682,8 +740,8 @@ init_adminq_free_arq:
init_adminq_free_asq:
i40e_shutdown_asq(hw);
init_adminq_destroy_spinlocks:
- i40e_destroy_spinlock(&hw->aq.asq_spinlock);
- i40e_destroy_spinlock(&hw->aq.arq_spinlock);
+ i40e_destroy_spinlock(&aq->asq_spinlock);
+ i40e_destroy_spinlock(&aq->arq_spinlock);
init_adminq_exit:
return ret_code;
@@ -728,7 +786,7 @@ u16 i40e_clean_asq(struct i40e_hw *hw)
desc = I40E_ADMINQ_DESC(*asq, ntc);
details = I40E_ADMINQ_DETAILS(*asq, ntc);
while (rd32(hw, hw->aq.asq.head) != ntc) {
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
"ntc %d head %d.\n", ntc, rd32(hw, hw->aq.asq.head));
if (details->callback) {
@@ -808,7 +866,7 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
if (val >= hw->aq.num_asq_entries) {
i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
"AQTX: head overrun at %d\n", val);
- status = I40E_ERR_QUEUE_EMPTY;
+ status = I40E_ERR_ADMIN_QUEUE_FULL;
goto asq_send_command_error;
}
@@ -896,7 +954,7 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
}
/* bump the tail */
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQTX: desc and buffer:\n");
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQTX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc_on_ring,
buff, buff_size);
(hw->aq.asq.next_to_use)++;
@@ -942,12 +1000,14 @@ enum i40e_status_code i40e_asq_send_command(struct i40e_hw *hw,
cmd_completed = TRUE;
if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_OK)
status = I40E_SUCCESS;
+ else if ((enum i40e_admin_queue_err)retval == I40E_AQ_RC_EBUSY)
+ status = I40E_ERR_NOT_READY;
else
status = I40E_ERR_ADMIN_QUEUE_ERROR;
hw->aq.asq_last_status = (enum i40e_admin_queue_err)retval;
}
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE,
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND,
"AQTX: desc and buffer writeback:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, buff, buff_size);
@@ -1063,7 +1123,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.r.arq_bi[desc_idx].va,
e->msg_len, I40E_DMA_TO_NONDMA);
- i40e_debug(hw, I40E_DEBUG_AQ_MESSAGE, "AQRX: desc and buffer:\n");
+ i40e_debug(hw, I40E_DEBUG_AQ_COMMAND, "AQRX: desc and buffer:\n");
i40e_debug_aq(hw, I40E_DEBUG_AQ_COMMAND, (void *)desc, e->msg_buf,
hw->aq.arq_buf_size);
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
index 8944493bba4c..9ac7b84ceec4 100644
--- a/sys/dev/ixl/i40e_adminq_cmd.h
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -43,8 +43,8 @@
#define I40E_FW_API_VERSION_MAJOR 0x0001
-#define I40E_FW_API_VERSION_MINOR_X722 0x0005
-#define I40E_FW_API_VERSION_MINOR_X710 0x0007
+#define I40E_FW_API_VERSION_MINOR_X722 0x000A
+#define I40E_FW_API_VERSION_MINOR_X710 0x000A
#define I40E_FW_MINOR_VERSION(_h) ((_h)->mac.type == I40E_MAC_XL710 ? \
I40E_FW_API_VERSION_MINOR_X710 : \
@@ -52,6 +52,12 @@
/* API version 1.7 implements additional link and PHY-specific APIs */
#define I40E_MINOR_VER_GET_LINK_INFO_XL710 0x0007
+/* API version 1.9 for X722 implements additional link and PHY-specific APIs */
+#define I40E_MINOR_VER_GET_LINK_INFO_X722 0x0009
+/* API version 1.6 for X722 devices adds ability to stop FW LLDP agent */
+#define I40E_MINOR_VER_FW_LLDP_STOPPABLE_X722 0x0006
+/* API version 1.10 for X722 devices adds ability to request FEC encoding */
+#define I40E_MINOR_VER_FW_REQUEST_FEC_X722 0x000A
struct i40e_aq_desc {
__le16 flags;
@@ -204,6 +210,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
+ i40e_aqc_opc_replace_cloud_filters = 0x025F,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -289,6 +296,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_get_cee_dcb_cfg = 0x0A07,
i40e_aqc_opc_lldp_set_local_mib = 0x0A08,
i40e_aqc_opc_lldp_stop_start_spec_agent = 0x0A09,
+ i40e_aqc_opc_lldp_restore = 0x0A0A,
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
@@ -1382,14 +1390,17 @@ struct i40e_aqc_add_remove_cloud_filters {
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT 0
#define I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_MASK (0x3FF << \
I40E_AQC_ADD_CLOUD_CMD_SEID_NUM_SHIFT)
- u8 reserved2[4];
+ u8 big_buffer_flag;
+#define I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER 1
+#define I40E_AQC_ADD_CLOUD_CMD_BB 1
+ u8 reserved2[3];
__le32 addr_high;
__le32 addr_low;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_add_remove_cloud_filters);
-struct i40e_aqc_add_remove_cloud_filters_element_data {
+struct i40e_aqc_cloud_filters_element_data {
u8 outer_mac[6];
u8 inner_mac[6];
__le16 inner_vlan;
@@ -1401,13 +1412,16 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
struct {
u8 data[16];
} v6;
+ struct {
+ __le16 data[8];
+ } raw_v6;
} ipaddr;
__le16 flags;
#define I40E_AQC_ADD_CLOUD_FILTER_SHIFT 0
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0001 reserved */
/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
@@ -1419,6 +1433,13 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x000D reserved */
+/* 0x000E reserved */
+/* 0x000F reserved */
+/* 0x0010 to 0x0017 is for custom filters */
+#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
+#define I40E_AQC_ADD_CLOUD_FILTER_MAC_VLAN_PORT 0x0012 /* Dest MAC + VLAN + L4 Port */
#define I40E_AQC_ADD_CLOUD_FLAGS_TO_QUEUE 0x0080
#define I40E_AQC_ADD_CLOUD_VNK_SHIFT 6
@@ -1453,6 +1474,88 @@ struct i40e_aqc_add_remove_cloud_filters_element_data {
u8 response_reserved[7];
};
+/* i40e_aqc_add_rm_cloud_filt_elem_ext is used when
+ * I40E_AQC_ADD_REM_CLOUD_CMD_BIG_BUFFER flag is set.
+ */
+struct i40e_aqc_add_rm_cloud_filt_elem_ext {
+ struct i40e_aqc_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_cloud_filters_element_data);
+
+/* i40e_aqc_cloud_filters_element_bb is used when
+ * I40E_AQC_CLOUD_CMD_BB flag is set.
+ */
+struct i40e_aqc_cloud_filters_element_bb {
+ struct i40e_aqc_cloud_filters_element_data element;
+ u16 general_fields[32];
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD0 0
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD1 1
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X10_WORD2 2
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD0 3
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD1 4
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X11_WORD2 5
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD0 6
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD1 7
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X12_WORD2 8
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD0 9
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD1 10
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X13_WORD2 11
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD0 12
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD1 13
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X14_WORD2 14
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD0 15
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD1 16
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD2 17
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD3 18
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD4 19
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD5 20
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD6 21
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X16_WORD7 22
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD0 23
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD1 24
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD2 25
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD3 26
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD4 27
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD5 28
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD6 29
+#define I40E_AQC_ADD_CLOUD_FV_FLU_0X17_WORD7 30
+};
+
+I40E_CHECK_STRUCT_LEN(0x80, i40e_aqc_cloud_filters_element_bb);
+
struct i40e_aqc_remove_cloud_filters_completion {
__le16 perfect_ovlan_used;
__le16 perfect_ovlan_free;
@@ -1464,6 +1567,61 @@ struct i40e_aqc_remove_cloud_filters_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_remove_cloud_filters_completion);
+/* Replace filter Command 0x025F
+ * uses the i40e_aqc_replace_cloud_filters,
+ * and the generic indirect completion structure
+ */
+struct i40e_filter_data {
+ u8 filter_type;
+ u8 input[3];
+};
+
+I40E_CHECK_STRUCT_LEN(4, i40e_filter_data);
+
+struct i40e_aqc_replace_cloud_filters_cmd {
+ u8 valid_flags;
+#define I40E_AQC_REPLACE_L1_FILTER 0x0
+#define I40E_AQC_REPLACE_CLOUD_FILTER 0x1
+#define I40E_AQC_GET_CLOUD_FILTERS 0x2
+#define I40E_AQC_MIRROR_CLOUD_FILTER 0x4
+#define I40E_AQC_HIGH_PRIORITY_CLOUD_FILTER 0x8
+ u8 old_filter_type;
+ u8 new_filter_type;
+ u8 tr_bit;
+ u8 tr_bit2;
+ u8 reserved[3];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_replace_cloud_filters_cmd);
+
+struct i40e_aqc_replace_cloud_filters_cmd_buf {
+ u8 data[32];
+/* Filter type INPUT codes*/
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_ENTRIES_MAX 3
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_VALIDATED (1 << 7UL)
+
+/* Field Vector offsets */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_MAC_DA 0
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_ETH 6
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG 7
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_VLAN 8
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_OVLAN 9
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_STAG_IVLAN 10
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_TUNNLE_KEY 11
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IMAC 12
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_IP_DA 14
+/* big FLU */
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_OIP_DA 15
+
+#define I40E_AQC_REPLACE_CLOUD_CMD_INPUT_FV_INNER_VLAN 37
+ struct i40e_filter_data filters[8];
+};
+
+I40E_CHECK_STRUCT_LEN(0x40, i40e_aqc_replace_cloud_filters_cmd_buf);
+
/* Add Mirror Rule (indirect or direct 0x0260)
* Delete Mirror Rule (indirect or direct 0x0261)
* note: some rule types (4,5) do not use an external buffer.
@@ -1865,6 +2023,7 @@ struct i40e_aq_get_phy_abilities_resp {
#define I40E_AQ_PHY_FEC_ABILITY_KR 0x40
#define I40E_AQ_PHY_FEC_ABILITY_RS 0x80
__le16 eee_capability;
+#define I40E_AQ_EEE_AUTO 0x0001
#define I40E_AQ_EEE_100BASE_TX 0x0002
#define I40E_AQ_EEE_1000BASE_T 0x0004
#define I40E_AQ_EEE_10GBASE_T 0x0008
@@ -1931,20 +2090,21 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
struct i40e_aq_set_mac_config {
__le16 max_frame_size;
u8 params;
-#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
-#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
-#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
-#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
-#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
-#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
-#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
-#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
-#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_CRC_EN 0x04
+#define I40E_AQ_SET_MAC_CONFIG_PACING_MASK 0x78
+#define I40E_AQ_SET_MAC_CONFIG_PACING_SHIFT 3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_NONE 0x0
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1B_13TX 0xF
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_9TX 0x9
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_4TX 0x8
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_7TX 0x7
+#define I40E_AQ_SET_MAC_CONFIG_PACING_2DW_3TX 0x6
+#define I40E_AQ_SET_MAC_CONFIG_PACING_1DW_1TX 0x5
+#define I40E_AQ_SET_MAC_CONFIG_PACING_3DW_2TX 0x4
+#define I40E_AQ_SET_MAC_CONFIG_PACING_7DW_3TX 0x3
+#define I40E_AQ_SET_MAC_CONFIG_PACING_4DW_1TX 0x2
+#define I40E_AQ_SET_MAC_CONFIG_PACING_9DW_1TX 0x1
+#define I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN 0x80
u8 tx_timer_priority; /* bitmap */
__le16 tx_timer_value;
__le16 fc_refresh_threshold;
@@ -2077,8 +2237,8 @@ struct i40e_aqc_set_lb_mode {
#define I40E_AQ_LB_SERDES 2
#define I40E_AQ_LB_PHY_INT 3
#define I40E_AQ_LB_PHY_EXT 4
-#define I40E_AQ_LB_CPVL_PCS 5
-#define I40E_AQ_LB_CPVL_EXT 6
+#define I40E_AQ_LB_BASE_T_PCS 5
+#define I40E_AQ_LB_BASE_T_EXT 6
#define I40E_AQ_LB_PHY_LOCAL 0x01
#define I40E_AQ_LB_PHY_REMOTE 0x02
#define I40E_AQ_LB_MAC_LOCAL 0x04
@@ -2142,7 +2302,13 @@ struct i40e_aqc_phy_register_access {
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL 1
#define I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE 2
u8 dev_addres;
- u8 reserved1[2];
+ u8 cmd_flags;
+#define I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE 0x01
+#define I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER 0x02
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT 2
+#define I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK (0x3 << \
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT)
+ u8 reserved1;
__le32 reg_address;
__le32 reg_value;
u8 reserved2[4];
@@ -2157,6 +2323,8 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_phy_register_access);
struct i40e_aqc_nvm_update {
u8 command_flags;
#define I40E_AQ_NVM_LAST_CMD 0x01
+#define I40E_AQ_NVM_REARRANGE_TO_FLAT 0x20
+#define I40E_AQ_NVM_REARRANGE_TO_STRUCT 0x40
#define I40E_AQ_NVM_FLASH_ONLY 0x80
#define I40E_AQ_NVM_PRESERVATION_FLAGS_SHIFT 1
#define I40E_AQ_NVM_PRESERVATION_FLAGS_MASK 0x03
@@ -2404,18 +2572,19 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_update_tlv);
/* Stop LLDP (direct 0x0A05) */
struct i40e_aqc_lldp_stop {
u8 command;
-#define I40E_AQ_LLDP_AGENT_STOP 0x0
-#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP 0x0
+#define I40E_AQ_LLDP_AGENT_SHUTDOWN 0x1
+#define I40E_AQ_LLDP_AGENT_STOP_PERSIST 0x2
u8 reserved[15];
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop);
/* Start LLDP (direct 0x0A06) */
-
struct i40e_aqc_lldp_start {
u8 command;
-#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START 0x1
+#define I40E_AQ_LLDP_AGENT_START_PERSIST 0x2
u8 reserved[15];
};
@@ -2535,6 +2704,16 @@ struct i40e_aqc_lldp_stop_start_specific_agent {
I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_stop_start_specific_agent);
+/* Restore LLDP Agent factory settings (direct 0x0A0A) */
+struct i40e_aqc_lldp_restore {
+ u8 command;
+#define I40E_AQ_LLDP_AGENT_RESTORE_NOT 0x0
+#define I40E_AQ_LLDP_AGENT_RESTORE 0x1
+ u8 reserved[15];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_lldp_restore);
+
/* Add Udp Tunnel command and completion (direct 0x0B00) */
struct i40e_aqc_add_udp_tunnel {
__le16 udp_port;
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
index f5dfdde61ad2..f3744be7b637 100644
--- a/sys/dev/ixl/i40e_common.c
+++ b/sys/dev/ixl/i40e_common.c
@@ -66,6 +66,8 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_20G_KR2_A:
case I40E_DEV_ID_25G_B:
case I40E_DEV_ID_25G_SFP28:
+ case I40E_DEV_ID_X710_N3000:
+ case I40E_DEV_ID_XXV710_N3000:
hw->mac.type = I40E_MAC_XL710;
break;
case I40E_DEV_ID_KX_X722:
@@ -319,32 +321,37 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
void *buffer, u16 buf_len)
{
struct i40e_aq_desc *aq_desc = (struct i40e_aq_desc *)desc;
+ u32 effective_mask = hw->debug_mask & mask;
u8 *buf = (u8 *)buffer;
u16 len;
- u16 i = 0;
+ u16 i;
- if ((!(mask & hw->debug_mask)) || (desc == NULL))
+ if (!effective_mask || !desc)
return;
len = LE16_TO_CPU(aq_desc->datalen);
- i40e_debug(hw, mask,
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
"AQ CMD: opcode 0x%04X, flags 0x%04X, datalen 0x%04X, retval 0x%04X\n",
LE16_TO_CPU(aq_desc->opcode),
LE16_TO_CPU(aq_desc->flags),
LE16_TO_CPU(aq_desc->datalen),
LE16_TO_CPU(aq_desc->retval));
- i40e_debug(hw, mask, "\tcookie (h,l) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tcookie (h,l) 0x%08X 0x%08X\n",
LE32_TO_CPU(aq_desc->cookie_high),
LE32_TO_CPU(aq_desc->cookie_low));
- i40e_debug(hw, mask, "\tparam (0,1) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\tparam (0,1) 0x%08X 0x%08X\n",
LE32_TO_CPU(aq_desc->params.internal.param0),
LE32_TO_CPU(aq_desc->params.internal.param1));
- i40e_debug(hw, mask, "\taddr (h,l) 0x%08X 0x%08X\n",
+ i40e_debug(hw, mask & I40E_DEBUG_AQ_DESCRIPTOR,
+ "\taddr (h,l) 0x%08X 0x%08X\n",
LE32_TO_CPU(aq_desc->params.external.addr_high),
LE32_TO_CPU(aq_desc->params.external.addr_low));
- if ((buffer != NULL) && (aq_desc->datalen != 0)) {
+ if (buffer && (buf_len != 0) && (len != 0) &&
+ (effective_mask & I40E_DEBUG_AQ_DESC_BUFFER)) {
i40e_debug(hw, mask, "AQ CMD Buffer:\n");
if (buf_len < len)
len = buf_len;
@@ -1011,9 +1018,17 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
- if (hw->mac.type == I40E_MAC_X722)
- hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE |
- I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK;
+ /* NVMUpdate features structure initialization */
+ hw->nvmupd_features.major = I40E_NVMUPD_FEATURES_API_VER_MAJOR;
+ hw->nvmupd_features.minor = I40E_NVMUPD_FEATURES_API_VER_MINOR;
+ hw->nvmupd_features.size = sizeof(hw->nvmupd_features);
+ i40e_memset(hw->nvmupd_features.features, 0x0,
+ I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN *
+ sizeof(*hw->nvmupd_features.features),
+ I40E_NONDMA_MEM);
+
+ /* No features supported at the moment */
+ hw->nvmupd_features.features[0] = 0;
status = i40e_init_nvm(hw);
return status;
@@ -1272,6 +1287,29 @@ static enum i40e_media_type i40e_get_media_type(struct i40e_hw *hw)
return media;
}
+/**
+ * i40e_poll_globr - Poll for Global Reset completion
+ * @hw: pointer to the hardware structure
+ * @retry_limit: how many times to retry before failure
+ **/
+static enum i40e_status_code i40e_poll_globr(struct i40e_hw *hw,
+ u32 retry_limit)
+{
+ u32 cnt, reg = 0;
+
+ for (cnt = 0; cnt < retry_limit; cnt++) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT);
+ if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
+ return I40E_SUCCESS;
+ i40e_msec_delay(100);
+ }
+
+ DEBUGOUT("Global reset failed.\n");
+ DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg);
+
+ return I40E_ERR_RESET_FAILED;
+}
+
#define I40E_PF_RESET_WAIT_COUNT 200
/**
* i40e_pf_reset - Reset the PF
@@ -1295,7 +1333,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- grst_del = grst_del * 20;
+ grst_del = min(grst_del * 20, 160U);
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
@@ -1341,14 +1379,14 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
if (!(reg & I40E_PFGEN_CTRL_PFSWR_MASK))
break;
reg2 = rd32(hw, I40E_GLGEN_RSTAT);
- if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
- DEBUGOUT("Core reset upcoming. Skipping PF reset request.\n");
- DEBUGOUT1("I40E_GLGEN_RSTAT = 0x%x\n", reg2);
- return I40E_ERR_NOT_READY;
- }
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK)
+ break;
i40e_msec_delay(1);
}
- if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
+ if (reg2 & I40E_GLGEN_RSTAT_DEVSTATE_MASK) {
+ if (i40e_poll_globr(hw, grst_del) != I40E_SUCCESS)
+ return I40E_ERR_RESET_FAILED;
+ } else if (reg & I40E_PFGEN_CTRL_PFSWR_MASK) {
DEBUGOUT("PF reset polling failed to complete.\n");
return I40E_ERR_RESET_FAILED;
}
@@ -1480,7 +1518,6 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
if (!hw->func_caps.led[idx])
return 0;
-
gpio_val = rd32(hw, I40E_GLGEN_GPIO_CTL(idx));
port = (gpio_val & I40E_GLGEN_GPIO_CTL_PRT_NUM_MASK) >>
I40E_GLGEN_GPIO_CTL_PRT_NUM_SHIFT;
@@ -1499,8 +1536,15 @@ static u32 i40e_led_is_mine(struct i40e_hw *hw, int idx)
#define I40E_FILTER_ACTIVITY 0xE
#define I40E_LINK_ACTIVITY 0xC
#define I40E_MAC_ACTIVITY 0xD
+#define I40E_FW_LED BIT(4)
+#define I40E_LED_MODE_VALID (I40E_GLGEN_GPIO_CTL_LED_MODE_MASK >> \
+ I40E_GLGEN_GPIO_CTL_LED_MODE_SHIFT)
+
#define I40E_LED0 22
+#define I40E_PIN_FUNC_SDP 0x0
+#define I40E_PIN_FUNC_LED 0x1
+
/**
* i40e_led_get - return current on/off mode
* @hw: pointer to the hw struct
@@ -1562,8 +1606,10 @@ void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink)
u32 current_mode = 0;
int i;
- if (mode & 0xfffffff0)
+ if (mode & ~I40E_LED_MODE_VALID) {
DEBUGOUT1("invalid mode passed in %X\n", mode);
+ return;
+ }
/* as per the documentation GPIO 22-29 are the LED
* GPIO pins named LED0..LED7
@@ -1648,19 +1694,22 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
status = i40e_asq_send_command(hw, &desc, abilities,
abilities_size, cmd_details);
- if (status != I40E_SUCCESS)
- break;
-
- if (hw->aq.asq_last_status == I40E_AQ_RC_EIO) {
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EIO:
status = I40E_ERR_UNKNOWN_PHY;
break;
- } else if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) {
+ case I40E_AQ_RC_EAGAIN:
i40e_msec_delay(1);
total_delay++;
status = I40E_ERR_TIMEOUT;
+ break;
+ /* also covers I40E_AQ_RC_OK */
+ default:
+ break;
}
- } while ((hw->aq.asq_last_status != I40E_AQ_RC_OK) &&
- (total_delay < max_delay));
+
+ } while ((hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN) &&
+ (total_delay < max_delay));
if (status != I40E_SUCCESS)
return status;
@@ -1803,6 +1852,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
* @max_frame_size: Maximum Frame Size to be supported by the port
* @crc_en: Tell HW to append a CRC to outgoing frames
* @pacing: Pacing configurations
+ * @auto_drop_blocking_packets: Tell HW to drop packets if TC queue is blocked
* @cmd_details: pointer to command details structure or NULL
*
* Configure MAC settings for frame size, jumbo frame support and the
@@ -1811,6 +1861,7 @@ enum i40e_status_code i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
u16 max_frame_size,
bool crc_en, u16 pacing,
+ bool auto_drop_blocking_packets,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -1829,6 +1880,19 @@ enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
if (crc_en)
cmd->params |= I40E_AQ_SET_MAC_CONFIG_CRC_EN;
+ if (auto_drop_blocking_packets) {
+ if (hw->flags & I40E_HW_FLAG_DROP_MODE)
+ cmd->params |=
+ I40E_AQ_SET_MAC_CONFIG_DROP_BLOCKING_PACKET_EN;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "This FW api version does not support drop mode.\n");
+ }
+
+#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
+ cmd->fc_refresh_threshold =
+ CPU_TO_LE16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -1969,8 +2033,8 @@ enum i40e_status_code i40e_aq_get_link_info(struct i40e_hw *hw,
hw->aq.fw_min_ver < 40)) && hw_link_info->phy_type == 0xE)
hw_link_info->phy_type = I40E_PHY_TYPE_10GBASE_SFPP_CU;
- if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
- hw->aq.api_min_ver >= 7) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE &&
+ hw->mac.type != I40E_MAC_X722) {
__le32 tmp;
i40e_memcpy(&tmp, resp->link_type, sizeof(tmp),
@@ -2198,7 +2262,7 @@ enum i40e_status_code i40e_aq_add_vsi(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ sizeof(vsi_ctx->info), cmd_details);
if (status != I40E_SUCCESS)
goto aq_add_vsi_exit;
@@ -2615,7 +2679,7 @@ enum i40e_status_code i40e_aq_update_vsi_params(struct i40e_hw *hw,
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
status = i40e_asq_send_command(hw, &desc, &vsi_ctx->info,
- sizeof(vsi_ctx->info), cmd_details);
+ sizeof(vsi_ctx->info), cmd_details);
vsi_ctx->vsis_allocated = LE16_TO_CPU(resp->vsi_used);
vsi_ctx->vsis_unallocated = LE16_TO_CPU(resp->vsi_free);
@@ -2830,9 +2894,16 @@ enum i40e_status_code i40e_update_link_info(struct i40e_hw *hw)
if (status)
return status;
- hw->phy.link_info.req_fec_info =
- abilities.fec_cfg_curr_mod_ext_info &
- (I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS);
+ if (abilities.fec_cfg_curr_mod_ext_info &
+ I40E_AQ_ENABLE_FEC_AUTO)
+ hw->phy.link_info.req_fec_info =
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
+ else
+ hw->phy.link_info.req_fec_info =
+ abilities.fec_cfg_curr_mod_ext_info &
+ (I40E_AQ_REQUEST_FEC_KR |
+ I40E_AQ_REQUEST_FEC_RS);
i40e_memcpy(hw->phy.link_info.module_type, &abilities.module_type,
sizeof(hw->phy.link_info.module_type), I40E_NONDMA_TO_NONDMA);
@@ -4209,7 +4280,7 @@ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
cmd->type = mib_type;
cmd->length = CPU_TO_LE16(buff_size);
- cmd->address_high = CPU_TO_LE32(I40E_HI_WORD((u64)buff));
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)buff));
cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)buff));
status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
@@ -4245,151 +4316,39 @@ enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
}
/**
- * i40e_aq_add_lldp_tlv
+ * i40e_aq_restore_lldp
* @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: buffer with TLV to add
- * @buff_size: length of the buffer
- * @tlv_len: length of the TLV to be added
- * @mib_len: length of the LLDP MIB returned in response
+ * @setting: pointer to factory setting variable or NULL
+ * @restore: True if factory settings should be restored
* @cmd_details: pointer to command details structure or NULL
*
- * Add the specified TLV to LLDP Local MIB for the given bridge type,
- * it is responsibility of the caller to make sure that the TLV is not
- * already present in the LLDPDU.
- * In return firmware will write the complete LLDP MIB with the newly
- * added TLV in the response buffer.
+ * Restore LLDP Agent factory settings if @restore set to True. In other case
+ * only returns factory setting in AQ response.
**/
-enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
- void *buff, u16 buff_size, u16 tlv_len,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_add_tlv *cmd =
- (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
- enum i40e_status_code status;
-
- if (buff_size == 0 || !buff || tlv_len == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_add_tlv);
-
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
-
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- cmd->len = CPU_TO_LE16(tlv_len);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
- }
-
- return status;
-}
-
-/**
- * i40e_aq_update_lldp_tlv
- * @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: buffer with TLV to update
- * @buff_size: size of the buffer holding original and updated TLVs
- * @old_len: Length of the Original TLV
- * @new_len: Length of the Updated TLV
- * @offset: offset of the updated TLV in the buff
- * @mib_len: length of the returned LLDP MIB
- * @cmd_details: pointer to command details structure or NULL
- *
- * Update the specified TLV to the LLDP Local MIB for the given bridge type.
- * Firmware will place the complete LLDP MIB in response buffer with the
- * updated TLV.
- **/
-enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 old_len, u16 new_len, u16 offset,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_update_tlv *cmd =
- (struct i40e_aqc_lldp_update_tlv *)&desc.params.raw;
+ struct i40e_aqc_lldp_restore *cmd =
+ (struct i40e_aqc_lldp_restore *)&desc.params.raw;
enum i40e_status_code status;
- if (buff_size == 0 || !buff || offset == 0 ||
- old_len == 0 || new_len == 0)
- return I40E_ERR_PARAM;
-
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_update_tlv);
-
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
-
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
- cmd->old_len = CPU_TO_LE16(old_len);
- cmd->new_offset = CPU_TO_LE16(offset);
- cmd->new_len = CPU_TO_LE16(new_len);
-
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Restore LLDP not supported by current FW version.\n");
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
}
- return status;
-}
-
-/**
- * i40e_aq_delete_lldp_tlv
- * @hw: pointer to the hw struct
- * @bridge_type: type of bridge
- * @buff: pointer to a user supplied buffer that has the TLV
- * @buff_size: length of the buffer
- * @tlv_len: length of the TLV to be deleted
- * @mib_len: length of the returned LLDP MIB
- * @cmd_details: pointer to command details structure or NULL
- *
- * Delete the specified TLV from LLDP Local MIB for the given bridge type.
- * The firmware places the entire LLDP MIB in the response buffer.
- **/
-enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 tlv_len, u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aqc_lldp_add_tlv *cmd =
- (struct i40e_aqc_lldp_add_tlv *)&desc.params.raw;
- enum i40e_status_code status;
-
- if (buff_size == 0 || !buff)
- return I40E_ERR_PARAM;
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_restore);
- i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_delete_tlv);
+ if (restore)
+ cmd->command |= I40E_AQ_LLDP_AGENT_RESTORE;
- /* Indirect Command */
- desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (buff_size > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
- desc.datalen = CPU_TO_LE16(buff_size);
- cmd->len = CPU_TO_LE16(tlv_len);
- cmd->type = ((bridge_type << I40E_AQ_LLDP_BRIDGE_TYPE_SHIFT) &
- I40E_AQ_LLDP_BRIDGE_TYPE_MASK);
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
- status = i40e_asq_send_command(hw, &desc, buff, buff_size, cmd_details);
- if (!status) {
- if (mib_len != NULL)
- *mib_len = LE16_TO_CPU(desc.datalen);
- }
+ if (setting)
+ *setting = cmd->command & 1;
return status;
}
@@ -4398,11 +4357,13 @@ enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
* i40e_aq_stop_lldp
* @hw: pointer to the hw struct
* @shutdown_agent: True if LLDP Agent needs to be Shutdown
+ * @persist: True if stop of LLDP should be persistent across power cycles
* @cmd_details: pointer to command details structure or NULL
*
* Stop or Shutdown the embedded LLDP Agent
**/
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -4415,6 +4376,14 @@ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
if (shutdown_agent)
cmd->command |= I40E_AQ_LLDP_AGENT_SHUTDOWN;
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_STOP_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Stop LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4423,11 +4392,13 @@ enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
/**
* i40e_aq_start_lldp
* @hw: pointer to the hw struct
+ * @persist: True if start of LLDP should be persistent across power cycles
* @cmd_details: pointer to command details structure or NULL
*
* Start the embedded LLDP Agent on all ports.
**/
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
@@ -4438,6 +4409,15 @@ enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_lldp_start);
cmd->command = I40E_AQ_LLDP_AGENT_START;
+
+ if (persist) {
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT)
+ cmd->command |= I40E_AQ_LLDP_AGENT_START_PERSIST;
+ else
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Persistent Start LLDP not supported by current FW version.\n");
+ }
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4459,9 +4439,7 @@ i40e_aq_set_dcb_parameters(struct i40e_hw *hw, bool dcb_enable,
(struct i40e_aqc_set_dcb_parameters *)&desc.params.raw;
enum i40e_status_code status;
- if ((hw->mac.type != I40E_MAC_XL710) ||
- ((hw->aq.api_maj_ver < 1) ||
- ((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 6))))
+ if (!(hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE))
return I40E_ERR_DEVICE_NOT_SUPPORTED;
i40e_fill_default_direct_cmd_desc(&desc,
@@ -4655,7 +4633,6 @@ enum i40e_status_code i40e_aq_delete_element(struct i40e_hw *hw, u16 seid,
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_delete_element);
cmd->seid = CPU_TO_LE16(seid);
-
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
@@ -4836,8 +4813,6 @@ enum i40e_status_code i40e_aq_add_mcast_etag(struct i40e_hw *hw, u16 pv_seid,
cmd->num_unicast_etags = num_tags_in_buf;
desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
- if (length > I40E_AQ_LARGE_BUF)
- desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_LB);
status = i40e_asq_send_command(hw, &desc, buf, length, cmd_details);
@@ -5634,10 +5609,10 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
* to be shifted 1 byte over from the VxLAN VNI
**/
static void i40e_fix_up_geneve_vni(
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
- struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+ struct i40e_aqc_cloud_filters_element_data *f = filters;
int i;
for (i = 0; i < filter_count; i++) {
@@ -5662,13 +5637,13 @@ static void i40e_fix_up_geneve_vni(
* @filter_count: number of filters contained in the buffer
*
* Set the cloud filters for a given VSI. The contents of the
- * i40e_aqc_add_remove_cloud_filters_element_data are filled
+ * i40e_aqc_cloud_filters_element_data are filled
* in by the caller of the function.
*
**/
enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ struct i40e_aqc_cloud_filters_element_data *filters,
u8 filter_count)
{
struct i40e_aq_desc desc;
@@ -5694,21 +5669,78 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
}
/**
- * i40e_aq_remove_cloud_filters
+ * i40e_aq_add_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to add cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be added
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Set the cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * the function.
+ *
+ **/
+enum i40e_status_code
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_add_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_rem_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to remove cloud filters from
* @filters: Buffer which contains the filters to be removed
* @filter_count: number of filters contained in the buffer
*
* Remove the cloud filters for a given VSI. The contents of the
- * i40e_aqc_add_remove_cloud_filters_element_data are filled
- * in by the caller of the function.
+ * i40e_aqc_cloud_filters_element_data are filled in by the caller
+ * of the function.
*
**/
-enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 seid,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count)
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count)
{
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
@@ -5733,6 +5765,115 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
}
/**
+ * i40e_aq_rem_cloud_filters_bb
+ * @hw: pointer to the hardware structure
+ * @seid: VSI seid to remove cloud filters from
+ * @filters: Buffer which contains the filters in big buffer to be removed
+ * @filter_count: number of filters contained in the buffer
+ *
+ * Remove the big buffer cloud filters for a given VSI. The contents of the
+ * i40e_aqc_cloud_filters_element_bb are filled in by the caller of the
+ * function.
+ *
+ **/
+enum i40e_status_code
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_add_remove_cloud_filters *cmd =
+ (struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 buff_len;
+ int i;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_remove_cloud_filters);
+
+ buff_len = filter_count * sizeof(*filters);
+ desc.datalen = CPU_TO_LE16(buff_len);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->num_filters = filter_count;
+ cmd->seid = CPU_TO_LE16(seid);
+ cmd->big_buffer_flag = I40E_AQC_ADD_CLOUD_CMD_BB;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(filters[i].element.flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+
+ /* Due to hardware eccentricities, the VNI for Geneve is shifted
+ * one more byte further than normally used for Tenant ID in
+ * other tunnel types.
+ */
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(filters[i].element.tenant_id);
+ filters[i].element.tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+
+ status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_replace_cloud_filters - Replace cloud filter command
+ * @hw: pointer to the hw struct
+ * @filters: pointer to the i40e_aqc_replace_cloud_filter_cmd struct
+ * @cmd_buf: pointer to the i40e_aqc_replace_cloud_filter_cmd_buf struct
+ *
+ **/
+enum
+i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_replace_cloud_filters_cmd *cmd =
+ (struct i40e_aqc_replace_cloud_filters_cmd *)&desc.params.raw;
+ enum i40e_status_code status = I40E_SUCCESS;
+ int i = 0;
+
+ /* X722 doesn't support this command */
+ if (hw->mac.type == I40E_MAC_X722)
+ return I40E_ERR_DEVICE_NOT_SUPPORTED;
+
+ /* need FW version greater than 6.00 */
+ if (hw->aq.fw_maj_ver < 6)
+ return I40E_NOT_SUPPORTED;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_replace_cloud_filters);
+
+ desc.datalen = CPU_TO_LE16(32);
+ desc.flags |= CPU_TO_LE16((u16)(I40E_AQ_FLAG_BUF | I40E_AQ_FLAG_RD));
+ cmd->old_filter_type = filters->old_filter_type;
+ cmd->new_filter_type = filters->new_filter_type;
+ cmd->valid_flags = filters->valid_flags;
+ cmd->tr_bit = filters->tr_bit;
+ cmd->tr_bit2 = filters->tr_bit2;
+
+ status = i40e_asq_send_command(hw, &desc, cmd_buf,
+ sizeof(struct i40e_aqc_replace_cloud_filters_cmd_buf), NULL);
+
+ /* for get cloud filters command */
+ for (i = 0; i < 32; i += 4) {
+ cmd_buf->filters[i / 4].filter_type = cmd_buf->data[i];
+ cmd_buf->filters[i / 4].input[0] = cmd_buf->data[i + 1];
+ cmd_buf->filters[i / 4].input[1] = cmd_buf->data[i + 2];
+ cmd_buf->filters[i / 4].input[2] = cmd_buf->data[i + 3];
+ }
+
+ return status;
+}
+
+
+/**
* i40e_aq_alternate_write
* @hw: pointer to the hardware structure
* @reg_addr0: address of first dword to be read
@@ -6554,8 +6695,8 @@ phy_blinking_end:
* @led_addr: LED register address
* @reg_val: read register value
**/
-static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
- u32 *reg_val)
+enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val)
{
enum i40e_status_code status;
u8 phy_addr = 0;
@@ -6564,7 +6705,7 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, TRUE,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -6583,8 +6724,8 @@ static enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
* @led_addr: LED register address
* @reg_val: register value to write
**/
-static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
- u32 reg_val)
+enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val)
{
enum i40e_status_code status;
u8 phy_addr = 0;
@@ -6592,7 +6733,7 @@ static enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, TRUE,
I40E_PHY_LED_PROV_REG_1,
reg_val, NULL);
} else {
@@ -6626,7 +6767,7 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL,
- I40E_PHY_COM_REG_PAGE,
+ I40E_PHY_COM_REG_PAGE, TRUE,
I40E_PHY_LED_PROV_REG_1,
&reg_val_aq, NULL);
if (status == I40E_SUCCESS)
@@ -6827,20 +6968,51 @@ do_retry:
}
/**
- * i40e_aq_set_phy_register
+ * i40e_mdio_if_number_selection - MDIO I/F number selection
+ * @hw: pointer to the hw struct
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
+ * @cmd: pointer to PHY Register command structure
+ **/
+static void
+i40e_mdio_if_number_selection(struct i40e_hw *hw, bool set_mdio, u8 mdio_num,
+ struct i40e_aqc_phy_register_access *cmd)
+{
+ if (set_mdio && cmd->phy_interface == I40E_AQ_PHY_REG_ACCESS_EXTERNAL) {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED)
+ cmd->cmd_flags |=
+ I40E_AQ_PHY_REG_ACCESS_SET_MDIO_IF_NUMBER |
+ ((mdio_num <<
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_SHIFT) &
+ I40E_AQ_PHY_REG_ACCESS_MDIO_IF_NUMBER_MASK);
+ else
+ i40e_debug(hw, I40E_DEBUG_PHY,
+ "MDIO I/F number selection not supported by current FW version.\n");
+ }
+}
+
+/**
+ * i40e_aq_set_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: enable auto page change
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: new register value
* @cmd_details: pointer to command details structure or NULL
*
* Write the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_set_phy_register.
**/
-enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -6855,26 +7027,38 @@ enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
cmd->reg_address = CPU_TO_LE32(reg_addr);
cmd->reg_value = CPU_TO_LE32(reg_val);
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
- * i40e_aq_get_phy_register
+ * i40e_aq_get_phy_register_ext
* @hw: pointer to the hw struct
* @phy_select: select which phy should be accessed
* @dev_addr: PHY device address
+ * @page_change: enable auto page change
+ * @set_mdio: use MDIO I/F number specified by mdio_num
+ * @mdio_num: MDIO I/F number
* @reg_addr: PHY register address
* @reg_val: read register value
* @cmd_details: pointer to command details structure or NULL
*
* Read the external PHY register.
+ * NOTE: In common cases MDIO I/F number should not be changed, thats why you
+ * may use simple wrapper i40e_aq_get_phy_register.
**/
-enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details)
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_phy_register_access *cmd =
@@ -6888,6 +7072,11 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
cmd->dev_addres = dev_addr;
cmd->reg_address = CPU_TO_LE32(reg_addr);
+ if (!page_change)
+ cmd->cmd_flags = I40E_AQ_PHY_REG_ACCESS_DONT_CHANGE_QSFP_PAGE;
+
+ i40e_mdio_if_number_selection(hw, set_mdio, mdio_num, cmd);
+
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (!status)
*reg_val = LE32_TO_CPU(cmd->reg_value);
@@ -6895,7 +7084,6 @@ enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
return status;
}
-
/**
* i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure
diff --git a/sys/dev/ixl/i40e_dcb.c b/sys/dev/ixl/i40e_dcb.c
index 9a940d2659f7..b2f0b5c0acad 100644
--- a/sys/dev/ixl/i40e_dcb.c
+++ b/sys/dev/ixl/i40e_dcb.c
@@ -893,22 +893,41 @@ out:
/**
* i40e_init_dcb
* @hw: pointer to the hw struct
+ * @enable_mib_change: enable mib change event
*
* Update DCB configuration from the Firmware
**/
-enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw, bool enable_mib_change)
{
enum i40e_status_code ret = I40E_SUCCESS;
struct i40e_lldp_variables lldp_cfg;
u8 adminstatus = 0;
if (!hw->func_caps.dcb)
- return ret;
+ return I40E_NOT_SUPPORTED;
/* Read LLDP NVM area */
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ if (hw->flags & I40E_HW_FLAG_FW_LLDP_PERSISTENT) {
+ u8 offset = 0;
+
+ if (hw->mac.type == I40E_MAC_XL710)
+ offset = I40E_LLDP_CURRENT_STATUS_XL710_OFFSET;
+ else if (hw->mac.type == I40E_MAC_X722)
+ offset = I40E_LLDP_CURRENT_STATUS_X722_OFFSET;
+ else
+ return I40E_NOT_SUPPORTED;
+
+ ret = i40e_read_nvm_module_data(hw,
+ I40E_SR_EMP_SR_SETTINGS_PTR,
+ offset,
+ I40E_LLDP_CURRENT_STATUS_OFFSET,
+ I40E_LLDP_CURRENT_STATUS_SIZE,
+ &lldp_cfg.adminstatus);
+ } else {
+ ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
+ }
if (ret)
- return ret;
+ return I40E_ERR_NOT_READY;
/* Get the LLDP AdminStatus for the current port */
adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
@@ -917,7 +936,7 @@ enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
/* LLDP agent disabled */
if (!adminstatus) {
hw->dcbx_status = I40E_DCBX_STATUS_DISABLED;
- return ret;
+ return I40E_ERR_NOT_READY;
}
/* Get DCBX status */
@@ -926,30 +945,67 @@ enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw)
return ret;
/* Check the DCBX Status */
- switch (hw->dcbx_status) {
- case I40E_DCBX_STATUS_DONE:
- case I40E_DCBX_STATUS_IN_PROGRESS:
+ if (hw->dcbx_status == I40E_DCBX_STATUS_DONE ||
+ hw->dcbx_status == I40E_DCBX_STATUS_IN_PROGRESS) {
/* Get current DCBX configuration */
ret = i40e_get_dcb_config(hw);
if (ret)
return ret;
- break;
- case I40E_DCBX_STATUS_DISABLED:
- return ret;
- case I40E_DCBX_STATUS_NOT_STARTED:
- case I40E_DCBX_STATUS_MULTIPLE_PEERS:
- default:
- break;
+ } else if (hw->dcbx_status == I40E_DCBX_STATUS_DISABLED) {
+ return I40E_ERR_NOT_READY;
}
/* Configure the LLDP MIB change event */
- ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
+ if (enable_mib_change)
+ ret = i40e_aq_cfg_lldp_mib_change_event(hw, TRUE, NULL);
+
+ return ret;
+}
+
+/**
+ * i40e_get_fw_lldp_status
+ * @hw: pointer to the hw struct
+ * @lldp_status: pointer to the status enum
+ *
+ * Get status of FW Link Layer Discovery Protocol (LLDP) Agent.
+ * Status of agent is reported via @lldp_status parameter.
+ **/
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status)
+{
+ enum i40e_status_code ret;
+ struct i40e_virt_mem mem;
+ u8 *lldpmib;
+
+ if (!lldp_status)
+ return I40E_ERR_PARAM;
+
+ /* Allocate buffer for the LLDPDU */
+ ret = i40e_allocate_virt_mem(hw, &mem, I40E_LLDPDU_SIZE);
if (ret)
return ret;
+ lldpmib = (u8 *)mem.va;
+ ret = i40e_aq_get_lldp_mib(hw, 0, 0, (void *)lldpmib,
+ I40E_LLDPDU_SIZE, NULL, NULL, NULL);
+
+ if (ret == I40E_SUCCESS) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_ENOENT) {
+ /* MIB is not available yet but the agent is running */
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_ENABLED;
+ ret = I40E_SUCCESS;
+ } else if (hw->aq.asq_last_status == I40E_AQ_RC_EPERM) {
+ *lldp_status = I40E_GET_FW_LLDP_STATUS_DISABLED;
+ ret = I40E_SUCCESS;
+ }
+
+ i40e_free_virt_mem(hw, &mem);
return ret;
}
+
/**
* i40e_add_ieee_ets_tlv - Prepare ETS TLV in IEEE format
* @tlv: Fill the ETS config data in IEEE format
@@ -1242,7 +1298,8 @@ enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw)
/**
* i40e_dcb_config_to_lldp - Convert Dcbconfig to MIB format
- * @hw: pointer to the hw struct
+ * @lldpmib: pointer to mib to be output
+ * @miblen: pointer to u16 for length of lldpmib
* @dcbcfg: store for LLDPDU data
*
* send DCB configuration to FW
diff --git a/sys/dev/ixl/i40e_dcb.h b/sys/dev/ixl/i40e_dcb.h
index e08474141489..2453d3a0f27d 100644
--- a/sys/dev/ixl/i40e_dcb.h
+++ b/sys/dev/ixl/i40e_dcb.h
@@ -69,6 +69,11 @@
#define I40E_LLDP_ADMINSTATUS_ENABLED_TX 2
#define I40E_LLDP_ADMINSTATUS_ENABLED_RXTX 3
+#define I40E_LLDP_CURRENT_STATUS_XL710_OFFSET 0x2B
+#define I40E_LLDP_CURRENT_STATUS_X722_OFFSET 0x31
+#define I40E_LLDP_CURRENT_STATUS_OFFSET 1
+#define I40E_LLDP_CURRENT_STATUS_SIZE 1
+
/* Defines for LLDP TLV header */
#define I40E_LLDP_MIB_HLEN 14
#define I40E_LLDP_TLV_LEN_SHIFT 0
@@ -208,6 +213,12 @@ struct i40e_dcbx_variables {
u32 deftsaassignment;
};
+
+enum i40e_get_fw_lldp_status_resp {
+ I40E_GET_FW_LLDP_STATUS_DISABLED = 0,
+ I40E_GET_FW_LLDP_STATUS_ENABLED = 1
+};
+
enum i40e_status_code i40e_get_dcbx_status(struct i40e_hw *hw,
u16 *status);
enum i40e_status_code i40e_lldp_to_dcb_config(u8 *lldpmib,
@@ -216,9 +227,12 @@ enum i40e_status_code i40e_aq_get_dcb_config(struct i40e_hw *hw, u8 mib_type,
u8 bridgetype,
struct i40e_dcbx_config *dcbcfg);
enum i40e_status_code i40e_get_dcb_config(struct i40e_hw *hw);
-enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw);
+enum i40e_status_code i40e_init_dcb(struct i40e_hw *hw,
+ bool enable_mib_change);
+enum i40e_status_code
+i40e_get_fw_lldp_status(struct i40e_hw *hw,
+ enum i40e_get_fw_lldp_status_resp *lldp_status);
enum i40e_status_code i40e_set_dcb_config(struct i40e_hw *hw);
enum i40e_status_code i40e_dcb_config_to_lldp(u8 *lldpmib, u16 *miblen,
struct i40e_dcbx_config *dcbcfg);
-
#endif /* _I40E_DCB_H_ */
diff --git a/sys/dev/ixl/i40e_devids.h b/sys/dev/ixl/i40e_devids.h
index 61810257f01a..182f82ad749f 100644
--- a/sys/dev/ixl/i40e_devids.h
+++ b/sys/dev/ixl/i40e_devids.h
@@ -39,6 +39,8 @@
#define I40E_INTEL_VENDOR_ID 0x8086
/* Device IDs */
+#define I40E_DEV_ID_X710_N3000 0x0CF8
+#define I40E_DEV_ID_XXV710_N3000 0x0D58
#define I40E_DEV_ID_SFP_XL710 0x1572
#define I40E_DEV_ID_QEMU 0x1574
#define I40E_DEV_ID_KX_B 0x1580
diff --git a/sys/dev/ixl/i40e_lan_hmc.c b/sys/dev/ixl/i40e_lan_hmc.c
index 45f8fba6d6a8..c280393ef1c8 100644
--- a/sys/dev/ixl/i40e_lan_hmc.c
+++ b/sys/dev/ixl/i40e_lan_hmc.c
@@ -144,7 +144,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Tx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
txq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -167,7 +167,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: Rx context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
rxq_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -190,7 +190,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE context: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_cntx_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -213,7 +213,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
ret_code = I40E_ERR_INVALID_HMC_OBJ_COUNT;
DEBUGOUT3("i40e_init_lan_hmc: FCoE filter: asks for 0x%x but max allowed is 0x%x, returns error %d\n",
fcoe_filt_num, obj->max_cnt, ret_code);
- goto init_lan_hmc_out;
+ goto free_hmc_out;
}
/* aggregate values into the full LAN object for later */
@@ -234,7 +234,7 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
(sizeof(struct i40e_hmc_sd_entry) *
hw->hmc.sd_table.sd_cnt));
if (ret_code)
- goto init_lan_hmc_out;
+ goto free_hmc_out;
hw->hmc.sd_table.sd_entry =
(struct i40e_hmc_sd_entry *)hw->hmc.sd_table.addr.va;
}
@@ -243,6 +243,11 @@ enum i40e_status_code i40e_init_lan_hmc(struct i40e_hw *hw, u32 txq_num,
init_lan_hmc_out:
return ret_code;
+free_hmc_out:
+ if (hw->hmc.hmc_obj_virt_mem.va)
+ i40e_free_virt_mem(hw, &hw->hmc.hmc_obj_virt_mem);
+
+ return ret_code;
}
/**
diff --git a/sys/dev/ixl/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c
index 6bcf51d0669a..20ba63a05b4e 100644
--- a/sys/dev/ixl/i40e_nvm.c
+++ b/sys/dev/ixl/i40e_nvm.c
@@ -367,6 +367,77 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
}
/**
+ * i40e_read_nvm_module_data - Reads NVM Buffer to specified memory location
+ * @hw: Pointer to the HW structure
+ * @module_ptr: Pointer to module in words with respect to NVM beginning
+ * @module_offset: Offset in words from module start
+ * @data_offset: Offset in words from reading data area start
+ * @words_data_size: Words to read from NVM
+ * @data_ptr: Pointer to memory location where resulting buffer will be stored
+ **/
+enum i40e_status_code
+i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
+ u16 data_offset, u16 words_data_size, u16 *data_ptr)
+{
+ enum i40e_status_code status;
+ u16 specific_ptr = 0;
+ u16 ptr_value = 0;
+ u16 offset = 0;
+
+ if (module_ptr != 0) {
+ status = i40e_read_nvm_word(hw, module_ptr, &ptr_value);
+ if (status != I40E_SUCCESS) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+ }
+#define I40E_NVM_INVALID_PTR_VAL 0x7FFF
+#define I40E_NVM_INVALID_VAL 0xFFFF
+
+ /* Pointer not initialized */
+ if (ptr_value == I40E_NVM_INVALID_PTR_VAL ||
+ ptr_value == I40E_NVM_INVALID_VAL) {
+ i40e_debug(hw, I40E_DEBUG_ALL, "Pointer not initialized.\n");
+ return I40E_ERR_BAD_PTR;
+ }
+
+ /* Check whether the module is in SR mapped area or outside */
+ if (ptr_value & I40E_PTR_TYPE) {
+ /* Pointer points outside of the Shared RAM mapped area */
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm data failed. Pointer points outside of the Shared RAM mapped area.\n");
+
+ return I40E_ERR_PARAM;
+ } else {
+ /* Read from the Shadow RAM */
+
+ status = i40e_read_nvm_word(hw, ptr_value + module_offset,
+ &specific_ptr);
+ if (status != I40E_SUCCESS) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm word failed.Error code: %d.\n",
+ status);
+ return I40E_ERR_NVM;
+ }
+
+ offset = ptr_value + module_offset + specific_ptr +
+ data_offset;
+
+ status = i40e_read_nvm_buffer(hw, offset, &words_data_size,
+ data_ptr);
+ if (status != I40E_SUCCESS) {
+ i40e_debug(hw, I40E_DEBUG_ALL,
+ "Reading nvm buffer failed.Error code: %d.\n",
+ status);
+ }
+ }
+
+ return status;
+}
+
+/**
* i40e_read_nvm_buffer_srctl - Reads Shadow RAM buffer via SRCTL register
* @hw: pointer to the HW structure
* @offset: offset of the Shadow RAM word to read (0x000000 - 0x001FFF).
@@ -504,10 +575,10 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
} else {
ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
}
+
return ret_code;
}
-
/**
* i40e_write_nvm_aq - Writes Shadow RAM.
* @hw: pointer to the HW structure.
@@ -826,6 +897,7 @@ static const char *i40e_nvm_update_state_str[] = {
"I40E_NVMUPD_EXEC_AQ",
"I40E_NVMUPD_GET_AQ_RESULT",
"I40E_NVMUPD_GET_AQ_EVENT",
+ "I40E_NVMUPD_GET_FEATURES",
};
/**
@@ -888,6 +960,31 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
return I40E_SUCCESS;
}
+ /*
+ * A supported features request returns immediately
+ * rather than going into state machine
+ */
+ if (upd_cmd == I40E_NVMUPD_FEATURES) {
+ if (cmd->data_size < hw->nvmupd_features.size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
+ /*
+ * If buffer is bigger than i40e_nvmupd_features structure,
+ * make sure the trailing bytes are set to 0x0.
+ */
+ if (cmd->data_size > hw->nvmupd_features.size)
+ i40e_memset(bytes + hw->nvmupd_features.size, 0x0,
+ cmd->data_size - hw->nvmupd_features.size,
+ I40E_NONDMA_MEM);
+
+ i40e_memcpy(bytes, &hw->nvmupd_features,
+ hw->nvmupd_features.size, I40E_NONDMA_MEM);
+
+ return I40E_SUCCESS;
+ }
+
/* Clear status even it is not read and log */
if (hw->nvmupd_state == I40E_NVMUPD_STATE_ERROR) {
i40e_debug(hw, I40E_DEBUG_NVM,
@@ -1354,10 +1451,20 @@ static enum i40e_nvmupd_cmd i40e_nvmupd_validate_command(struct i40e_hw *hw,
upd_cmd = I40E_NVMUPD_READ_SA;
break;
case I40E_NVM_EXEC:
- if (module == 0xf)
- upd_cmd = I40E_NVMUPD_STATUS;
- else if (module == 0)
+ switch (module) {
+ case I40E_NVM_EXEC_GET_AQ_RESULT:
upd_cmd = I40E_NVMUPD_GET_AQ_RESULT;
+ break;
+ case I40E_NVM_EXEC_FEATURES:
+ upd_cmd = I40E_NVMUPD_FEATURES;
+ break;
+ case I40E_NVM_EXEC_STATUS:
+ upd_cmd = I40E_NVMUPD_STATUS;
+ break;
+ default:
+ *perrno = -EFAULT;
+ return I40E_NVMUPD_INVALID;
+ }
break;
case I40E_NVM_AQE:
upd_cmd = I40E_NVMUPD_GET_AQ_EVENT;
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index ee8ca26c5d7a..df6848dff3f2 100644
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -33,6 +33,7 @@
/*$FreeBSD$*/
#include <sys/limits.h>
+#include <sys/time.h>
#include "ixl.h"
@@ -45,14 +46,13 @@ i40e_dmamap_cb(void *arg, bus_dma_segment_t * segs, int nseg, int error)
if (error)
return;
*(bus_addr_t *) arg = segs->ds_addr;
- return;
}
i40e_status
i40e_allocate_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem, u32 size)
{
mem->va = malloc(size, M_DEVBUF, M_NOWAIT | M_ZERO);
- return(mem->va == NULL);
+ return (mem->va == NULL);
}
i40e_status
@@ -61,7 +61,7 @@ i40e_free_virt_mem(struct i40e_hw *hw, struct i40e_virt_mem *mem)
free(mem->va, M_DEVBUF);
mem->va = NULL;
- return(0);
+ return (I40E_SUCCESS);
}
i40e_status
@@ -113,7 +113,7 @@ i40e_allocate_dma_mem(struct i40e_hw *hw, struct i40e_dma_mem *mem,
mem->size = size;
bus_dmamap_sync(mem->tag, mem->map,
BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE);
- return (0);
+ return (I40E_SUCCESS);
fail_2:
bus_dmamem_free(mem->tag, mem->va, mem->map);
fail_1:
@@ -161,25 +161,15 @@ i40e_destroy_spinlock(struct i40e_spinlock *lock)
mtx_destroy(&lock->mutex);
}
-static inline int
-ixl_ms_scale(int x)
-{
- if (hz == 1000)
- return (x);
- else if (hz > 1000)
- return (x*(hz/1000));
- else
- return (max(1, x/(1000/hz)));
-}
+#ifndef MSEC_2_TICKS
+#define MSEC_2_TICKS(m) max(1, (uint32_t)((hz == 1000) ? \
+ (m) : ((uint64_t)(m) * (uint64_t)hz)/(uint64_t)1000))
+#endif
void
i40e_msec_pause(int msecs)
{
- if (cold || SCHEDULER_STOPPED())
- i40e_msec_delay(msecs);
- else
- // ERJ: (msecs * hz) could overflow
- pause("ixl", ixl_ms_scale(msecs));
+ pause("i40e_msec_pause", MSEC_2_TICKS(msecs));
}
/*
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
index f63311b17590..51021ed28e6a 100644
--- a/sys/dev/ixl/i40e_prototype.h
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -100,7 +100,10 @@ enum i40e_status_code i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
-
+enum i40e_status_code i40e_led_get_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 *reg_val);
+enum i40e_status_code i40e_led_set_reg(struct i40e_hw *hw, u16 led_addr,
+ u32 reg_val);
/* admin send queue commands */
enum i40e_status_code i40e_aq_get_firmware_version(struct i40e_hw *hw,
@@ -133,6 +136,7 @@ enum i40e_status_code i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_mac_config(struct i40e_hw *hw,
u16 max_frame_size, bool crc_en, u16 pacing,
+ bool auto_drop_blocking_packets,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_local_advt_reg(struct i40e_hw *hw,
u64 *advt_reg,
@@ -276,26 +280,18 @@ enum i40e_status_code i40e_aq_set_lldp_mib(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_cfg_lldp_mib_change_event(struct i40e_hw *hw,
bool enable_update,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_add_lldp_tlv(struct i40e_hw *hw, u8 bridge_type,
- void *buff, u16 buff_size, u16 tlv_len,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_update_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 old_len, u16 new_len, u16 offset,
- u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_delete_lldp_tlv(struct i40e_hw *hw,
- u8 bridge_type, void *buff, u16 buff_size,
- u16 tlv_len, u16 *mib_len,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_restore_lldp(struct i40e_hw *hw, u8 *setting, bool restore,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_stop_lldp(struct i40e_hw *hw, bool shutdown_agent,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_dcb_parameters(struct i40e_hw *hw,
bool dcb_enable,
struct i40e_asq_cmd_details
*cmd_details);
enum i40e_status_code i40e_aq_start_lldp(struct i40e_hw *hw,
+ bool persist,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_cee_dcb_config(struct i40e_hw *hw,
void *buff, u16 buff_size,
@@ -394,17 +390,27 @@ enum i40e_status_code i40e_aq_query_switch_comp_bw_config(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_resume_port_tx(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_add_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_add_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_rem_cloud_filters(struct i40e_hw *hw, u16 vsi,
+ struct i40e_aqc_cloud_filters_element_data *filters,
+ u8 filter_count);
+enum i40e_status_code
+i40e_aq_rem_cloud_filters_bb(struct i40e_hw *hw, u16 seid,
+ struct i40e_aqc_cloud_filters_element_bb *filters,
+ u8 filter_count);
enum i40e_status_code i40e_read_lldp_cfg(struct i40e_hw *hw,
struct i40e_lldp_variables *lldp_cfg);
-enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
- u16 vsi,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count);
-
-enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
- u16 vsi,
- struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
- u8 filter_count);
+enum i40e_status_code i40e_aq_replace_cloud_filters(struct i40e_hw *hw,
+ struct i40e_aqc_replace_cloud_filters_cmd *filters,
+ struct i40e_aqc_replace_cloud_filters_cmd_buf *cmd_buf);
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@@ -446,6 +452,9 @@ enum i40e_status_code i40e_acquire_nvm(struct i40e_hw *hw,
void i40e_release_nvm(struct i40e_hw *hw);
enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
u16 *data);
+enum i40e_status_code
+i40e_read_nvm_module_data(struct i40e_hw *hw, u8 module_ptr, u16 module_offset,
+ u16 data_offset, u16 words_data_size, u16 *data_ptr);
enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
u16 *words, u16 *data);
enum i40e_status_code i40e_write_nvm_aq(struct i40e_hw *hw, u8 module,
@@ -548,14 +557,24 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
-enum i40e_status_code i40e_aq_set_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 reg_val,
- struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_phy_register(struct i40e_hw *hw,
- u8 phy_select, u8 dev_addr,
- u32 reg_addr, u32 *reg_val,
- struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_set_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code
+i40e_aq_get_phy_register_ext(struct i40e_hw *hw,
+ u8 phy_select, u8 dev_addr, bool page_change,
+ bool set_mdio, u8 mdio_num,
+ u32 reg_addr, u32 *reg_val,
+ struct i40e_asq_cmd_details *cmd_details);
+
+/* Convenience wrappers for most common use case */
+#define i40e_aq_set_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_set_phy_register_ext(hw, ps, da, pc, FALSE, 0, ra, rv, cd)
+#define i40e_aq_get_phy_register(hw, ps, da, pc, ra, rv, cd) \
+ i40e_aq_get_phy_register_ext(hw, ps, da, pc, FALSE, 0, ra, rv, cd)
enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
struct i40e_aqc_arp_proxy_data *proxy_config,
@@ -587,4 +606,6 @@ enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw,
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
index cc2598302d02..6c57d0a25f7e 100644
--- a/sys/dev/ixl/i40e_register.h
+++ b/sys/dev/ixl/i40e_register.h
@@ -90,7 +90,7 @@
#define I40E_PF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_PF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_PF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_PF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_PF_ARQT 0x00080480 /* Reset: EMPR */
#define I40E_PF_ARQT_ARQT_SHIFT 0
#define I40E_PF_ARQT_ARQT_MASK I40E_MASK(0x3FF, I40E_PF_ARQT_ARQT_SHIFT)
@@ -113,7 +113,7 @@
#define I40E_PF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_PF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_PF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_PF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_PF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_PF_ATQT 0x00080400 /* Reset: EMPR */
#define I40E_PF_ATQT_ATQT_SHIFT 0
#define I40E_PF_ATQT_ATQT_MASK I40E_MASK(0x3FF, I40E_PF_ATQT_ATQT_SHIFT)
@@ -140,7 +140,7 @@
#define I40E_VF_ARQLEN_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN_ARQENABLE_SHIFT)
#define I40E_VF_ARQT(_VF) (0x00082C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ARQT_MAX_INDEX 127
#define I40E_VF_ARQT_ARQT_SHIFT 0
@@ -168,7 +168,7 @@
#define I40E_VF_ATQLEN_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN_ATQENABLE_SHIFT)
#define I40E_VF_ATQT(_VF) (0x00082800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
#define I40E_VF_ATQT_MAX_INDEX 127
#define I40E_VF_ATQT_ATQT_SHIFT 0
@@ -291,7 +291,7 @@
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT 30
#define I40E_PRTDCB_RETSTCC_UPINTC_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_UPINTC_MODE_SHIFT)
#define I40E_PRTDCB_RETSTCC_ETSTC_SHIFT 31
-#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
+#define I40E_PRTDCB_RETSTCC_ETSTC_MASK I40E_MASK(0x1u, I40E_PRTDCB_RETSTCC_ETSTC_SHIFT)
#define I40E_PRTDCB_RPPMC 0x001223A0 /* Reset: CORER */
#define I40E_PRTDCB_RPPMC_LANRPPM_SHIFT 0
#define I40E_PRTDCB_RPPMC_LANRPPM_MASK I40E_MASK(0xFF, I40E_PRTDCB_RPPMC_LANRPPM_SHIFT)
@@ -395,6 +395,20 @@
#define I40E_GL_FWSTS_FWRI_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWRI_SHIFT)
#define I40E_GL_FWSTS_FWS1B_SHIFT 16
#define I40E_GL_FWSTS_FWS1B_MASK I40E_MASK(0xFF, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_0 I40E_MASK(0x20, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_GL_FWSTS_FWS1B_EMPR_10 I40E_MASK(0x2A, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK \
+ I40E_MASK(0x30, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK \
+ I40E_MASK(0x31, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_TRANSITION_MASK \
+ I40E_MASK(0x32, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK \
+ I40E_MASK(0x33, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK \
+ I40E_MASK(0xB, I40E_GL_FWSTS_FWS1B_SHIFT)
+#define I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK \
+ I40E_MASK(0xC, I40E_GL_FWSTS_FWS1B_SHIFT)
#define I40E_GLGEN_CLKSTAT 0x000B8184 /* Reset: POR */
#define I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT 0
#define I40E_GLGEN_CLKSTAT_CLKMODE_MASK I40E_MASK(0x1, I40E_GLGEN_CLKSTAT_CLKMODE_SHIFT)
@@ -535,7 +549,7 @@
#define I40E_GLGEN_MSCA_MDICMD_SHIFT 30
#define I40E_GLGEN_MSCA_MDICMD_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDICMD_SHIFT)
#define I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT 31
-#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
+#define I40E_GLGEN_MSCA_MDIINPROGEN_MASK I40E_MASK(0x1u, I40E_GLGEN_MSCA_MDIINPROGEN_SHIFT)
#define I40E_GLGEN_MSRWD(_i) (0x0008819C + ((_i) * 4)) /* _i=0...3 */ /* Reset: POR */
#define I40E_GLGEN_MSRWD_MAX_INDEX 3
#define I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT 0
@@ -1274,14 +1288,14 @@
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT 30
#define I40E_GLLAN_TXPRE_QDIS_SET_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_SET_QDIS_SHIFT)
#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT 31
-#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
+#define I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_MASK I40E_MASK(0x1u, I40E_GLLAN_TXPRE_QDIS_CLEAR_QDIS_SHIFT)
#define I40E_PFLAN_QALLOC 0x001C0400 /* Reset: CORER */
#define I40E_PFLAN_QALLOC_FIRSTQ_SHIFT 0
#define I40E_PFLAN_QALLOC_FIRSTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_FIRSTQ_SHIFT)
#define I40E_PFLAN_QALLOC_LASTQ_SHIFT 16
#define I40E_PFLAN_QALLOC_LASTQ_MASK I40E_MASK(0x7FF, I40E_PFLAN_QALLOC_LASTQ_SHIFT)
#define I40E_PFLAN_QALLOC_VALID_SHIFT 31
-#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1, I40E_PFLAN_QALLOC_VALID_SHIFT)
+#define I40E_PFLAN_QALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PFLAN_QALLOC_VALID_SHIFT)
#define I40E_QRX_ENA(_Q) (0x00120000 + ((_Q) * 4)) /* _i=0...1535 */ /* Reset: PFR */
#define I40E_QRX_ENA_MAX_INDEX 1535
#define I40E_QRX_ENA_QENA_REQ_SHIFT 0
@@ -1690,7 +1704,7 @@
#define I40E_GLNVM_SRCTL_START_SHIFT 30
#define I40E_GLNVM_SRCTL_START_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_START_SHIFT)
#define I40E_GLNVM_SRCTL_DONE_SHIFT 31
-#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_SRCTL_DONE_SHIFT)
+#define I40E_GLNVM_SRCTL_DONE_MASK I40E_MASK(0x1u, I40E_GLNVM_SRCTL_DONE_SHIFT)
#define I40E_GLNVM_SRDATA 0x000B6114 /* Reset: POR */
#define I40E_GLNVM_SRDATA_WRDATA_SHIFT 0
#define I40E_GLNVM_SRDATA_WRDATA_MASK I40E_MASK(0xFFFF, I40E_GLNVM_SRDATA_WRDATA_SHIFT)
@@ -3057,7 +3071,7 @@
#define I40E_PF_VT_PFALLOC_LASTVF_SHIFT 8
#define I40E_PF_VT_PFALLOC_LASTVF_MASK I40E_MASK(0xFF, I40E_PF_VT_PFALLOC_LASTVF_SHIFT)
#define I40E_PF_VT_PFALLOC_VALID_SHIFT 31
-#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1, I40E_PF_VT_PFALLOC_VALID_SHIFT)
+#define I40E_PF_VT_PFALLOC_VALID_MASK I40E_MASK(0x1u, I40E_PF_VT_PFALLOC_VALID_SHIFT)
#define I40E_VP_MDET_RX(_VF) (0x0012A000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: CORER */
#define I40E_VP_MDET_RX_MAX_INDEX 127
#define I40E_VP_MDET_RX_VALID_SHIFT 0
@@ -3193,7 +3207,7 @@
#define I40E_VF_ARQLEN1_ARQCRIT_SHIFT 30
#define I40E_VF_ARQLEN1_ARQCRIT_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQCRIT_SHIFT)
#define I40E_VF_ARQLEN1_ARQENABLE_SHIFT 31
-#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
+#define I40E_VF_ARQLEN1_ARQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ARQLEN1_ARQENABLE_SHIFT)
#define I40E_VF_ARQT1 0x00007000 /* Reset: EMPR */
#define I40E_VF_ARQT1_ARQT_SHIFT 0
#define I40E_VF_ARQT1_ARQT_MASK I40E_MASK(0x3FF, I40E_VF_ARQT1_ARQT_SHIFT)
@@ -3216,7 +3230,7 @@
#define I40E_VF_ATQLEN1_ATQCRIT_SHIFT 30
#define I40E_VF_ATQLEN1_ATQCRIT_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQCRIT_SHIFT)
#define I40E_VF_ATQLEN1_ATQENABLE_SHIFT 31
-#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
+#define I40E_VF_ATQLEN1_ATQENABLE_MASK I40E_MASK(0x1u, I40E_VF_ATQLEN1_ATQENABLE_SHIFT)
#define I40E_VF_ATQT1 0x00008400 /* Reset: EMPR */
#define I40E_VF_ATQT1_ATQT_SHIFT 0
#define I40E_VF_ATQT1_ATQT_MASK I40E_MASK(0x3FF, I40E_VF_ATQT1_ATQT_SHIFT)
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index b640a444de76..cda0cd413d82 100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -56,7 +56,7 @@
#define I40E_MAX_PF_VSI 64
#define I40E_MAX_PF_QP 128
#define I40E_MAX_VSI_QP 16
-#define I40E_MAX_VF_VSI 3
+#define I40E_MAX_VF_VSI 4
#define I40E_MAX_CHAINED_RX_BUFFERS 5
#define I40E_MAX_PF_UDP_OFFLOAD_PORTS 16
@@ -95,8 +95,8 @@ typedef void (*I40E_ADMINQ_CALLBACK)(struct i40e_hw *, struct i40e_aq_desc *);
#define I40E_HI_BYTE(x) ((u8)(((x) >> 8) & 0xFF))
#define I40E_LO_BYTE(x) ((u8)((x) & 0xFF))
-/* Number of Transmit Descriptors must be a multiple of 8. */
-#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 8
+/* Number of Transmit Descriptors must be a multiple of 32. */
+#define I40E_REQ_TX_DESCRIPTOR_MULTIPLE 32
/* Number of Receive Descriptors must be a multiple of 32 if
* the number of descriptors is greater than 32.
*/
@@ -126,6 +126,8 @@ enum i40e_debug_mask {
I40E_DEBUG_DIAG = 0x00000800,
I40E_DEBUG_FD = 0x00001000,
+ I40E_DEBUG_IWARP = 0x00F00000,
+
I40E_DEBUG_AQ_MESSAGE = 0x01000000,
I40E_DEBUG_AQ_DESCRIPTOR = 0x02000000,
I40E_DEBUG_AQ_DESC_BUFFER = 0x04000000,
@@ -188,7 +190,6 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
-
/* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the
@@ -242,6 +243,7 @@ enum i40e_vsi_type {
I40E_VSI_MIRROR = 5,
I40E_VSI_SRIOV = 6,
I40E_VSI_FDIR = 7,
+ I40E_VSI_IWARP = 8,
I40E_VSI_TYPE_UNKNOWN
};
@@ -373,6 +375,7 @@ struct i40e_hw_capabilities {
#define I40E_CLOUD_FILTER_MODE1 0x6
#define I40E_CLOUD_FILTER_MODE2 0x7
#define I40E_CLOUD_FILTER_MODE3 0x8
+#define I40E_SWITCH_MODE_MASK 0xF
u32 management_mode;
u32 mng_protocols_over_mctp;
@@ -487,6 +490,7 @@ enum i40e_nvmupd_cmd {
I40E_NVMUPD_EXEC_AQ,
I40E_NVMUPD_GET_AQ_RESULT,
I40E_NVMUPD_GET_AQ_EVENT,
+ I40E_NVMUPD_FEATURES,
};
enum i40e_nvmupd_state {
@@ -522,6 +526,10 @@ enum i40e_nvmupd_state {
#define I40E_NVM_AQE 0xe
#define I40E_NVM_EXEC 0xf
+#define I40E_NVM_EXEC_GET_AQ_RESULT 0x0
+#define I40E_NVM_EXEC_FEATURES 0xe
+#define I40E_NVM_EXEC_STATUS 0xf
+
#define I40E_NVM_ADAPT_SHIFT 16
#define I40E_NVM_ADAPT_MASK (0xffffULL << I40E_NVM_ADAPT_SHIFT)
@@ -536,6 +544,20 @@ struct i40e_nvm_access {
u8 data[1];
};
+/* NVMUpdate features API */
+#define I40E_NVMUPD_FEATURES_API_VER_MAJOR 0
+#define I40E_NVMUPD_FEATURES_API_VER_MINOR 14
+#define I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN 12
+
+#define I40E_NVMUPD_FEATURE_FLAT_NVM_SUPPORT BIT(0)
+
+struct i40e_nvmupd_features {
+ u8 major;
+ u8 minor;
+ u16 size;
+ u8 features[I40E_NVMUPD_FEATURES_API_FEATURES_ARRAY_LEN];
+};
+
/* (Q)SFP module access definitions */
#define I40E_I2C_EEPROM_DEV_ADDR 0xA0
#define I40E_I2C_EEPROM_DEV_ADDR2 0xA2
@@ -727,6 +749,11 @@ struct i40e_hw {
#define I40E_HW_FLAG_802_1AD_CAPABLE BIT_ULL(1)
#define I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE BIT_ULL(2)
#define I40E_HW_FLAG_NVM_READ_REQUIRES_LOCK BIT_ULL(3)
+#define I40E_HW_FLAG_FW_LLDP_STOPPABLE BIT_ULL(4)
+#define I40E_HW_FLAG_FW_LLDP_PERSISTENT BIT_ULL(5)
+#define I40E_HW_FLAG_AQ_PHY_ACCESS_EXTENDED BIT_ULL(6)
+#define I40E_HW_FLAG_DROP_MODE BIT_ULL(7)
+#define I40E_HW_FLAG_X722_FEC_REQUEST_CAPABLE BIT_ULL(8)
u64 flags;
/* Used in set switch config AQ command */
@@ -734,6 +761,9 @@ struct i40e_hw {
u16 first_tag;
u16 second_tag;
+ /* NVMUpdate features */
+ struct i40e_nvmupd_features nvmupd_features;
+
/* debug mask */
u32 debug_mask;
char err_str[16];
diff --git a/sys/dev/ixl/if_iavf.c b/sys/dev/ixl/if_iavf.c
index ce79f2e6a234..3abafc8fe78f 100644
--- a/sys/dev/ixl/if_iavf.c
+++ b/sys/dev/ixl/if_iavf.c
@@ -2124,7 +2124,7 @@ iavf_add_device_sysctls(struct iavf_sc *sc)
/* Add stats sysctls */
ixl_add_vsi_sysctls(dev, vsi, ctx, "vsi");
- ixl_add_queues_sysctls(dev, vsi);
+ ixl_vsi_add_queues_stats(vsi, ctx);
}
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 27e9d9908455..7d4c696024be 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -48,7 +48,7 @@
* Driver version
*********************************************************************/
#define IXL_DRIVER_VERSION_MAJOR 2
-#define IXL_DRIVER_VERSION_MINOR 1
+#define IXL_DRIVER_VERSION_MINOR 2
#define IXL_DRIVER_VERSION_BUILD 0
#define IXL_DRIVER_VERSION_STRING \
@@ -126,6 +126,8 @@ static void ixl_if_vflr_handle(if_ctx_t ctx);
static u_int ixl_mc_filter_apply(void *, struct sockaddr_dl *, u_int);
static void ixl_save_pf_tunables(struct ixl_pf *);
static int ixl_allocate_pci_resources(struct ixl_pf *);
+static void ixl_setup_ssctx(struct ixl_pf *pf);
+static void ixl_admin_timer(void *arg);
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -211,6 +213,7 @@ static driver_t ixl_if_driver = {
static SYSCTL_NODE(_hw, OID_AUTO, ixl, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
"ixl driver parameters");
+#ifdef IXL_DEBUG_FC
/*
* Leave this on unless you need to send flow control
* frames (or other control frames) from software
@@ -221,6 +224,16 @@ TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
&ixl_enable_tx_fc_filter, 0,
"Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
+#endif
+
+#ifdef IXL_DEBUG
+static int ixl_debug_recovery_mode = 0;
+TUNABLE_INT("hw.ixl.debug_recovery_mode",
+ &ixl_debug_recovery_mode);
+SYSCTL_INT(_hw_ixl, OID_AUTO, debug_recovery_mode, CTLFLAG_RDTUN,
+ &ixl_debug_recovery_mode, 0,
+ "Act like when FW entered recovery mode (for debuging)");
+#endif
static int ixl_i2c_access_method = 0;
TUNABLE_INT("hw.ixl.i2c_access_method",
@@ -355,7 +368,7 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
rid = PCIR_BAR(0);
pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
&rid, RF_ACTIVE);
-
+
if (!(pf->pci_mem)) {
device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
return (ENXIO);
@@ -384,9 +397,79 @@ ixl_allocate_pci_resources(struct ixl_pf *pf)
pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
pf->hw.back = &pf->osdep;
-
+
return (0);
- }
+}
+
+static void
+ixl_setup_ssctx(struct ixl_pf *pf)
+{
+ if_softc_ctx_t scctx = pf->vsi.shared;
+ struct i40e_hw *hw = &pf->hw;
+
+ if (IXL_PF_IN_RECOVERY_MODE(pf)) {
+ scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 1;
+ scctx->isc_ntxqsets = scctx->isc_nrxqsets = 1;
+ } else if (hw->mac.type == I40E_MAC_X722)
+ scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
+ else
+ scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
+
+ if (pf->vsi.enable_head_writeback) {
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
+ * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
+ scctx->isc_txrx = &ixl_txrx_hwb;
+ } else {
+ scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
+ * sizeof(struct i40e_tx_desc), DBA_ALIGN);
+ scctx->isc_txrx = &ixl_txrx_dwb;
+ }
+
+ scctx->isc_txrx->ift_legacy_intr = ixl_intr;
+ scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
+ * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
+ scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
+ scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
+ scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
+ scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
+ scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
+ scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
+ scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
+ scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
+}
+
+static void
+ixl_admin_timer(void *arg)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg;
+
+ /* Fire off the admin task */
+ iflib_admin_intr_deferred(pf->vsi.ctx);
+
+ /* Reschedule the admin timer */
+ callout_schedule(&pf->admin_timer, hz/2);
+}
+
+static int
+ixl_attach_pre_recovery_mode(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+
+ device_printf(dev, "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+
+ i40e_get_mac_addr(hw, hw->mac.addr);
+
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+ }
+
+ ixl_setup_ssctx(pf);
+
+ return (0);
+}
static int
ixl_if_attach_pre(if_ctx_t ctx)
@@ -395,7 +478,7 @@ ixl_if_attach_pre(if_ctx_t ctx)
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
- if_softc_ctx_t scctx;
+ enum i40e_get_fw_lldp_status_resp lldp_status;
struct i40e_filter_control_settings filter;
enum i40e_status_code status;
int error = 0;
@@ -416,7 +499,12 @@ ixl_if_attach_pre(if_ctx_t ctx)
vsi->num_vlans = 0;
vsi->ctx = ctx;
vsi->media = iflib_get_media(ctx);
- vsi->shared = scctx = iflib_get_softc_ctx(ctx);
+ vsi->shared = iflib_get_softc_ctx(ctx);
+
+ snprintf(pf->admin_mtx_name, sizeof(pf->admin_mtx_name),
+ "%s:admin", device_get_nameunit(dev));
+ mtx_init(&pf->admin_mtx, pf->admin_mtx_name, NULL, MTX_DEF);
+ callout_init_mtx(&pf->admin_timer, &pf->admin_mtx, 0);
/* Save tunable values */
ixl_save_pf_tunables(pf);
@@ -430,13 +518,11 @@ ixl_if_attach_pre(if_ctx_t ctx)
/* Establish a clean starting point */
i40e_clear_hw(hw);
- status = i40e_pf_reset(hw);
- if (status) {
- device_printf(dev, "PF reset failure %s\n",
- i40e_stat_str(hw, status));
- error = EIO;
+ i40e_set_mac_type(hw);
+
+ error = ixl_pf_reset(pf);
+ if (error)
goto err_out;
- }
/* Initialize the shared code */
status = i40e_init_shared_code(hw);
@@ -483,6 +569,13 @@ ixl_if_attach_pre(if_ctx_t ctx)
device_printf(dev, "Please update the NVM image.\n");
}
+ if (IXL_PF_IN_RECOVERY_MODE(pf)) {
+ error = ixl_attach_pre_recovery_mode(pf);
+ if (error)
+ goto err_out;
+ return (error);
+ }
+
/* Clear PXE mode */
i40e_clear_pxe_mode(hw);
@@ -495,24 +588,14 @@ ixl_if_attach_pre(if_ctx_t ctx)
}
/* Set up host memory cache */
- status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (status) {
- device_printf(dev, "init_lan_hmc failed: %s\n",
- i40e_stat_str(hw, status));
- goto err_get_cap;
- }
- status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (status) {
- device_printf(dev, "configure_lan_hmc failed: %s\n",
- i40e_stat_str(hw, status));
+ error = ixl_setup_hmc(pf);
+ if (error)
goto err_mac_hmc;
- }
/* Disable LLDP from the firmware for certain NVM versions */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4)) {
- i40e_aq_stop_lldp(hw, TRUE, NULL);
+ i40e_aq_stop_lldp(hw, true, false, NULL);
pf->state |= IXL_PF_STATE_FW_LLDP_DISABLED;
}
@@ -537,46 +620,36 @@ ixl_if_attach_pre(if_ctx_t ctx)
device_printf(dev, "i40e_set_filter_control() failed\n");
/* Query device FW LLDP status */
- ixl_get_fw_lldp_status(pf);
+ if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
+ if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
+ atomic_set_32(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else {
+ atomic_clear_32(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ }
+ }
+
/* Tell FW to apply DCB config on link up */
i40e_aq_set_dcb_parameters(hw, true, NULL);
/* Fill out iflib parameters */
- if (hw->mac.type == I40E_MAC_X722)
- scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 128;
- else
- scctx->isc_ntxqsets_max = scctx->isc_nrxqsets_max = 64;
- if (vsi->enable_head_writeback) {
- scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
- * sizeof(struct i40e_tx_desc) + sizeof(u32), DBA_ALIGN);
- scctx->isc_txrx = &ixl_txrx_hwb;
- } else {
- scctx->isc_txqsizes[0] = roundup2(scctx->isc_ntxd[0]
- * sizeof(struct i40e_tx_desc), DBA_ALIGN);
- scctx->isc_txrx = &ixl_txrx_dwb;
- }
- scctx->isc_txrx->ift_legacy_intr = ixl_intr;
- scctx->isc_rxqsizes[0] = roundup2(scctx->isc_nrxd[0]
- * sizeof(union i40e_32byte_rx_desc), DBA_ALIGN);
- scctx->isc_msix_bar = PCIR_BAR(IXL_MSIX_BAR);
- scctx->isc_tx_nsegments = IXL_MAX_TX_SEGS;
- scctx->isc_tx_tso_segments_max = IXL_MAX_TSO_SEGS;
- scctx->isc_tx_tso_size_max = IXL_TSO_SIZE;
- scctx->isc_tx_tso_segsize_max = IXL_MAX_DMA_SEG_SIZE;
- scctx->isc_rss_table_size = pf->hw.func_caps.rss_table_size;
- scctx->isc_tx_csum_flags = CSUM_OFFLOAD;
- scctx->isc_capabilities = scctx->isc_capenable = IXL_CAPS;
+ ixl_setup_ssctx(pf);
INIT_DBG_DEV(dev, "end");
return (0);
err_mac_hmc:
- i40e_shutdown_lan_hmc(hw);
+ ixl_shutdown_hmc(pf);
err_get_cap:
i40e_shutdown_adminq(hw);
err_out:
ixl_free_pci_resources(pf);
err_pci_res:
+ mtx_lock(&pf->admin_mtx);
+ callout_stop(&pf->admin_timer);
+ mtx_unlock(&pf->admin_mtx);
+ mtx_destroy(&pf->admin_mtx);
return (error);
}
@@ -610,6 +683,22 @@ ixl_if_attach_post(if_ctx_t ctx)
goto err;
}
+ if (IXL_PF_IN_RECOVERY_MODE(pf)) {
+ /* Keep admin queue interrupts active while driver is loaded */
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+ }
+
+ ixl_add_sysctls_recovery_mode(pf);
+
+ /* Start the admin timer */
+ mtx_lock(&pf->admin_mtx);
+ callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
+ mtx_unlock(&pf->admin_mtx);
+ return (0);
+ }
+
/* Determine link state */
if (ixl_attach_get_link_status(pf)) {
error = EINVAL;
@@ -700,6 +789,10 @@ ixl_if_attach_post(if_ctx_t ctx)
device_printf(dev, "The device is not iWARP enabled\n");
}
#endif
+ /* Start the admin timer */
+ mtx_lock(&pf->admin_mtx);
+ callout_reset(&pf->admin_timer, hz/2, ixl_admin_timer, pf);
+ mtx_unlock(&pf->admin_mtx);
INIT_DBG_DEV(dev, "end");
return (0);
@@ -728,6 +821,12 @@ ixl_if_detach(if_ctx_t ctx)
INIT_DBG_DEV(dev, "begin");
+ /* Stop the admin timer */
+ mtx_lock(&pf->admin_mtx);
+ callout_stop(&pf->admin_timer);
+ mtx_unlock(&pf->admin_mtx);
+ mtx_destroy(&pf->admin_mtx);
+
#ifdef IXL_IW
if (ixl_enable_iwarp && pf->iw_enabled) {
error = ixl_iw_pf_detach(pf);
@@ -741,13 +840,7 @@ ixl_if_detach(if_ctx_t ctx)
ifmedia_removeall(vsi->media);
/* Shutdown LAN HMC */
- if (hw->hmc.hmc_obj) {
- status = i40e_shutdown_lan_hmc(hw);
- if (status)
- device_printf(dev,
- "i40e_shutdown_lan_hmc() failed with status %s\n",
- i40e_stat_str(hw, status));
- }
+ ixl_shutdown_hmc(pf);
/* Shutdown admin queue */
ixl_disable_intr0(hw);
@@ -819,6 +912,8 @@ ixl_if_init(if_ctx_t ctx)
u8 tmpaddr[ETHER_ADDR_LEN];
int ret;
+ if (IXL_PF_IN_RECOVERY_MODE(pf))
+ return;
/*
* If the aq is dead here, it probably means something outside of the driver
* did something to the adapter, like a PF reset.
@@ -827,7 +922,7 @@ ixl_if_init(if_ctx_t ctx)
if (!i40e_check_asq_alive(&pf->hw)) {
device_printf(dev, "Admin Queue is down; resetting...\n");
ixl_teardown_hw_structs(pf);
- ixl_rebuild_hw_structs_after_reset(pf);
+ ixl_rebuild_hw_structs_after_reset(pf, false);
}
/* Get the latest mac address... User might use a LAA */
@@ -853,7 +948,7 @@ ixl_if_init(if_ctx_t ctx)
device_printf(dev, "initialize vsi failed!!\n");
return;
}
-
+
/* Reconfigure multicast filters in HW */
ixl_if_multi_set(ctx);
@@ -900,6 +995,9 @@ ixl_if_stop(if_ctx_t ctx)
INIT_DEBUGOUT("ixl_if_stop: begin\n");
+ if (IXL_PF_IN_RECOVERY_MODE(pf))
+ return;
+
// TODO: This may need to be reworked
#ifdef IXL_IW
/* Stop iWARP device */
@@ -1065,7 +1163,7 @@ ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxq
device_printf(iflib_get_dev(ctx), "Unable to allocate TX ring memory\n");
return (ENOMEM);
}
-
+
for (i = 0, que = vsi->tx_queues; i < ntxqsets; i++, que++) {
struct tx_ring *txr = &que->txr;
@@ -1089,7 +1187,7 @@ ixl_if_tx_queues_alloc(if_ctx_t ctx, caddr_t *vaddrs, uint64_t *paddrs, int ntxq
txr->tx_paddr = paddrs[i * ntxqs];
txr->que = que;
}
-
+
return (0);
fail:
ixl_if_queues_free(ctx);
@@ -1166,6 +1264,9 @@ ixl_if_queues_free(if_ctx_t ctx)
free(vsi->rx_queues, M_IXL);
vsi->rx_queues = NULL;
}
+
+ if (!IXL_PF_IN_RECOVERY_MODE(pf))
+ sysctl_ctx_free(&vsi->sysctl_ctx);
}
void
@@ -1175,7 +1276,7 @@ ixl_update_link_status(struct ixl_pf *pf)
struct i40e_hw *hw = &pf->hw;
u64 baudrate;
- if (pf->link_up) {
+ if (pf->link_up) {
if (vsi->link_active == FALSE) {
vsi->link_active = TRUE;
baudrate = ixl_max_aq_speed_to_value(hw->phy.link_info.link_speed);
@@ -1184,7 +1285,6 @@ ixl_update_link_status(struct ixl_pf *pf)
#ifdef PCI_IOV
ixl_broadcast_link_state(pf);
#endif
-
}
} else { /* Link down */
if (vsi->link_active == TRUE) {
@@ -1271,20 +1371,27 @@ ixl_process_adminq(struct ixl_pf *pf, u16 *pending)
static void
ixl_if_update_admin_status(if_ctx_t ctx)
{
- struct ixl_pf *pf = iflib_get_softc(ctx);
- struct i40e_hw *hw = &pf->hw;
- u16 pending;
+ struct ixl_pf *pf = iflib_get_softc(ctx);
+ struct i40e_hw *hw = &pf->hw;
+ u16 pending;
if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING)
ixl_handle_empr_reset(pf);
+ /*
+ * Admin Queue is shut down while handling reset.
+ * Don't proceed if it hasn't been re-initialized
+ * e.g due to an issue with new FW.
+ */
+ if (!i40e_check_asq_alive(&pf->hw))
+ return;
+
if (pf->state & IXL_PF_STATE_MDD_PENDING)
ixl_handle_mdd_event(pf);
ixl_process_adminq(pf, &pending);
ixl_update_link_status(pf);
- ixl_update_stats_counters(pf);
-
+
/*
* If there are still messages to process, reschedule ourselves.
* Otherwise, re-enable our interrupt and go to sleep.
@@ -1522,11 +1629,12 @@ ixl_if_promisc_set(if_ctx_t ctx, int flags)
static void
ixl_if_timer(if_ctx_t ctx, uint16_t qid)
{
+ struct ixl_pf *pf = iflib_get_softc(ctx);
+
if (qid != 0)
return;
- /* Fire off the adminq task */
- iflib_admin_intr_deferred(ctx);
+ ixl_update_stats_counters(pf);
}
static void
@@ -1690,7 +1798,12 @@ ixl_save_pf_tunables(struct ixl_pf *pf)
device_t dev = pf->dev;
/* Save tunable information */
+#ifdef IXL_DEBUG_FC
pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
+#endif
+#ifdef IXL_DEBUG
+ pf->recovery_mode = ixl_debug_recovery_mode;
+#endif
pf->dbg_mask = ixl_core_debug_mask;
pf->hw.debug_mask = ixl_shared_debug_mask;
pf->vsi.enable_head_writeback = !!(ixl_enable_head_writeback);
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 41d682dadfea..65e92c470a6c 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -200,6 +200,15 @@
#define CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6|CSUM_UDP_IPV6|CSUM_SCTP_IPV6)
#define CSUM_OFFLOAD (CSUM_OFFLOAD_IPV4|CSUM_OFFLOAD_IPV6|CSUM_TSO)
+/* Misc flags for ixl_vsi.flags */
+#define IXL_FLAGS_KEEP_TSO4 (1 << 0)
+#define IXL_FLAGS_KEEP_TSO6 (1 << 1)
+#define IXL_FLAGS_USES_MSIX (1 << 2)
+#define IXL_FLAGS_IS_VF (1 << 3)
+
+#define IXL_VSI_IS_PF(v) ((v->flags & IXL_FLAGS_IS_VF) == 0)
+#define IXL_VSI_IS_VF(v) ((v->flags & IXL_FLAGS_IS_VF) != 0)
+
#define IXL_VF_RESET_TIMEOUT 100
#define IXL_VSI_DATA_PORT 0x01
@@ -292,7 +301,7 @@
#endif
/* For stats sysctl naming */
-#define QUEUE_NAME_LEN 32
+#define IXL_QUEUE_NAME_LEN 32
#define IXL_DEV_ERR(_dev, _format, ...) \
device_printf(_dev, "%s: " _format " (%s:%d)\n", __func__, ##__VA_ARGS__, __FILE__, __LINE__)
@@ -438,6 +447,7 @@ struct ixl_vsi {
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
u16 num_macs;
+ u64 num_hw_filters;
/* Contains readylist & stat counter id */
struct i40e_aqc_vsi_properties_data info;
@@ -447,7 +457,7 @@ struct ixl_vsi {
/* Per-VSI stats from hardware */
struct i40e_eth_stats eth_stats;
struct i40e_eth_stats eth_stats_offsets;
- bool stat_offsets_loaded;
+ bool stat_offsets_loaded;
/* VSI stat counters */
u64 ipackets;
u64 ierrors;
@@ -461,14 +471,11 @@ struct ixl_vsi {
u64 oqdrops;
u64 noproto;
- /* Driver statistics */
- u64 hw_filters_del;
- u64 hw_filters_add;
-
/* Misc. */
- u64 flags;
+ u64 flags;
/* Stats sysctls for this VSI */
struct sysctl_oid *vsi_node;
+ struct sysctl_ctx_list sysctl_ctx;
};
/*
@@ -497,9 +504,9 @@ ixl_new_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
*/
static inline bool
cmp_etheraddr(const u8 *ea1, const u8 *ea2)
-{
- return (bcmp(ea1, ea2, 6) == 0);
-}
+{
+ return (bcmp(ea1, ea2, ETHER_ADDR_LEN) == 0);
+}
/*
* Return next largest power of 2, unsigned
@@ -548,5 +555,6 @@ void ixl_add_vsi_sysctls(device_t dev, struct ixl_vsi *vsi,
void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct i40e_eth_stats *eth_stats);
-void ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi);
+void ixl_vsi_add_queues_stats(struct ixl_vsi *vsi,
+ struct sysctl_ctx_list *ctx);
#endif /* _IXL_H_ */
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
index b72277233e48..ff58b65897e2 100644
--- a/sys/dev/ixl/ixl_pf.h
+++ b/sys/dev/ixl/ixl_pf.h
@@ -36,6 +36,8 @@
#ifndef _IXL_PF_H_
#define _IXL_PF_H_
+#include "i40e_dcb.h"
+
#include "ixl.h"
#include "ixl_pf_qmgr.h"
@@ -59,19 +61,38 @@
I40E_VFINT_DYN_CTLN(((vector) - 1) + \
(((hw)->func_caps.num_msix_vectors_vf - 1) * (vf_num)))
+enum ixl_fw_mode {
+ IXL_FW_MODE_NORMAL,
+ IXL_FW_MODE_RECOVERY,
+ IXL_FW_MODE_UEMPR
+};
+
+enum ixl_i2c_access_method_t {
+ IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE = 0,
+ IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS = 1,
+ IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD = 2,
+ IXL_I2C_ACCESS_METHOD_AQ = 3,
+ IXL_I2C_ACCESS_METHOD_TYPE_LENGTH = 4
+};
+
/* Used in struct ixl_pf's state field */
enum ixl_pf_state {
- IXL_PF_STATE_ADAPTER_RESETTING = (1 << 0),
- IXL_PF_STATE_MDD_PENDING = (1 << 1),
- IXL_PF_STATE_PF_RESET_REQ = (1 << 2),
- IXL_PF_STATE_VF_RESET_REQ = (1 << 3),
- IXL_PF_STATE_PF_CRIT_ERR = (1 << 4),
- IXL_PF_STATE_CORE_RESET_REQ = (1 << 5),
- IXL_PF_STATE_GLOB_RESET_REQ = (1 << 6),
- IXL_PF_STATE_EMP_RESET_REQ = (1 << 7),
- IXL_PF_STATE_FW_LLDP_DISABLED = (1 << 8),
+ IXL_PF_STATE_RECOVERY_MODE = (1 << 0),
+ IXL_PF_STATE_ADAPTER_RESETTING = (1 << 1),
+ IXL_PF_STATE_MDD_PENDING = (1 << 2),
+ IXL_PF_STATE_PF_RESET_REQ = (1 << 3),
+ IXL_PF_STATE_VF_RESET_REQ = (1 << 4),
+ IXL_PF_STATE_PF_CRIT_ERR = (1 << 5),
+ IXL_PF_STATE_CORE_RESET_REQ = (1 << 6),
+ IXL_PF_STATE_GLOB_RESET_REQ = (1 << 7),
+ IXL_PF_STATE_EMP_RESET_REQ = (1 << 8),
+ IXL_PF_STATE_FW_LLDP_DISABLED = (1 << 9),
};
+#define IXL_PF_IN_RECOVERY_MODE(pf) \
+ ((atomic_load_acq_32(&pf->state) & IXL_PF_STATE_RECOVERY_MODE) != 0)
+
+
struct ixl_vf {
struct ixl_vsi vsi;
u32 vf_flags;
@@ -79,10 +100,9 @@ struct ixl_vf {
u8 mac[ETHER_ADDR_LEN];
u16 vf_num;
- u32 version;
+ struct virtchnl_version_info version;
struct ixl_pf_qtag qtag;
- struct sysctl_ctx_list ctx;
};
/* Physical controller structure */
@@ -105,8 +125,17 @@ struct ixl_pf {
struct ixl_pf_qmgr qmgr;
struct ixl_pf_qtag qtag;
+ char admin_mtx_name[16]; /* name of the admin mutex */
+ struct mtx admin_mtx; /* mutex to protect the admin timer */
+ struct callout admin_timer; /* timer to trigger admin task */
+
/* Tunable values */
+#ifdef IXL_DEBUG_FC
bool enable_tx_fc_filter;
+#endif
+#ifdef IXL_DEBUG
+ bool recovery_mode;
+#endif
int dynamic_rx_itr;
int dynamic_tx_itr;
int tx_itr;
@@ -128,16 +157,17 @@ struct ixl_pf {
bool stat_offsets_loaded;
/* I2C access methods */
- u8 i2c_access_method;
- s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
+ enum ixl_i2c_access_method_t i2c_access_method;
+ s32 (*read_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 *data);
- s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
+ s32 (*write_i2c_byte)(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
/* SR-IOV */
struct ixl_vf *vfs;
int num_vfs;
uint16_t veb_seid;
+ int vc_debug_lvl;
};
/*
@@ -223,8 +253,6 @@ struct ixl_pf {
"\t1 - Enable (VEB)\n" \
"Enabling this will allow VFs in separate VMs to communicate over the hardware bridge."
-extern const char * const ixl_fc_string[6];
-
MALLOC_DECLARE(M_IXL);
/*** Functions / Macros ***/
@@ -239,15 +267,14 @@ MALLOC_DECLARE(M_IXL);
ixl_send_vf_nack_msg((pf), (vf), (op), (st), __FILE__, __LINE__)
/* Debug printing */
-#define ixl_dbg(pf, m, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, m, s, ##__VA_ARGS__)
-#define ixl_dbg_info(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_INFO, s, ##__VA_ARGS__)
-#define ixl_dbg_filter(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_FILTER, s, ##__VA_ARGS__)
-#define ixl_dbg_iov(pf, s, ...) ixl_debug_core(pf->dev, pf->dbg_mask, IXL_DBG_IOV, s, ##__VA_ARGS__)
+#define ixl_dbg(pf, m, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, m, s, ##__VA_ARGS__)
+#define ixl_dbg_info(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_INFO, s, ##__VA_ARGS__)
+#define ixl_dbg_filter(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_FILTER, s, ##__VA_ARGS__)
+#define ixl_dbg_iov(pf, s, ...) ixl_debug_core((pf)->dev, (pf)->dbg_mask, IXL_DBG_IOV, s, ##__VA_ARGS__)
/* PF-only function declarations */
int ixl_setup_interface(device_t, struct ixl_pf *);
void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
-char * ixl_aq_speed_to_str(enum i40e_aq_link_speed);
void ixl_handle_que(void *context, int pending);
@@ -261,9 +288,7 @@ int ixl_msix_adminq(void *);
void ixl_do_adminq(void *, int);
int ixl_res_alloc_cmp(const void *, const void *);
-char * ixl_switch_res_type_string(u8);
-char * ixl_switch_element_string(struct sbuf *,
- struct i40e_aqc_switch_config_element_resp *);
+const char * ixl_switch_res_type_string(u8);
void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
struct sysctl_oid_list *, struct i40e_hw_port_stats *);
@@ -282,6 +307,7 @@ void ixl_stat_update32(struct i40e_hw *, u32, bool,
u64 *, u64 *);
void ixl_stop(struct ixl_pf *);
+void ixl_vsi_add_sysctls(struct ixl_vsi *, const char *, bool);
int ixl_get_hw_capabilities(struct ixl_pf *);
void ixl_link_up_msg(struct ixl_pf *);
void ixl_update_link_status(struct ixl_pf *);
@@ -289,19 +315,20 @@ int ixl_setup_stations(struct ixl_pf *);
int ixl_switch_config(struct ixl_pf *);
void ixl_stop_locked(struct ixl_pf *);
int ixl_teardown_hw_structs(struct ixl_pf *);
-int ixl_reset(struct ixl_pf *);
void ixl_init_locked(struct ixl_pf *);
void ixl_set_rss_key(struct ixl_pf *);
void ixl_set_rss_pctypes(struct ixl_pf *);
void ixl_set_rss_hlut(struct ixl_pf *);
int ixl_setup_adminq_msix(struct ixl_pf *);
int ixl_setup_adminq_tq(struct ixl_pf *);
-int ixl_teardown_adminq_msix(struct ixl_pf *);
+void ixl_teardown_adminq_msix(struct ixl_pf *);
void ixl_configure_intr0_msix(struct ixl_pf *);
void ixl_configure_queue_intr_msix(struct ixl_pf *);
void ixl_free_adminq_tq(struct ixl_pf *);
int ixl_setup_legacy(struct ixl_pf *);
int ixl_init_msix(struct ixl_pf *);
+void ixl_configure_tx_itr(struct ixl_pf *);
+void ixl_configure_rx_itr(struct ixl_pf *);
void ixl_configure_itr(struct ixl_pf *);
void ixl_configure_legacy(struct ixl_pf *);
void ixl_free_pci_resources(struct ixl_pf *);
@@ -310,6 +337,7 @@ void ixl_config_rss(struct ixl_pf *);
int ixl_set_advertised_speeds(struct ixl_pf *, int, bool);
void ixl_set_initial_advertised_speeds(struct ixl_pf *);
void ixl_print_nvm_version(struct ixl_pf *pf);
+void ixl_add_sysctls_recovery_mode(struct ixl_pf *);
void ixl_add_device_sysctls(struct ixl_pf *);
void ixl_handle_mdd_event(struct ixl_pf *);
void ixl_add_hw_stats(struct ixl_pf *);
@@ -320,9 +348,14 @@ int ixl_aq_get_link_status(struct ixl_pf *,
struct i40e_aqc_get_link_status *);
int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
+int ixl_handle_i2c_eeprom_read_cmd(struct ixl_pf *, struct ifreq *ifr);
+
+int ixl_setup_hmc(struct ixl_pf *);
+void ixl_shutdown_hmc(struct ixl_pf *);
void ixl_handle_empr_reset(struct ixl_pf *);
int ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up);
-int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
+int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *, bool is_up);
+int ixl_pf_reset(struct ixl_pf *);
void ixl_set_queue_rx_itr(struct ixl_rx_queue *);
void ixl_set_queue_tx_itr(struct ixl_tx_queue *);
@@ -344,7 +377,7 @@ int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
void ixl_update_eth_stats(struct ixl_vsi *);
void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
int ixl_initialize_vsi(struct ixl_vsi *);
-void ixl_add_ifmedia(struct ixl_vsi *, u64);
+void ixl_add_ifmedia(struct ifmedia *, u64);
int ixl_setup_queue_msix(struct ixl_vsi *);
int ixl_setup_queue_tqs(struct ixl_vsi *);
int ixl_teardown_queue_msix(struct ixl_vsi *);
@@ -388,8 +421,8 @@ s32 ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
s32 ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
u8 dev_addr, u8 data);
-int ixl_get_fw_lldp_status(struct ixl_pf *pf);
+u64 ixl_max_aq_speed_to_value(u8);
int ixl_attach_get_link_status(struct ixl_pf *);
-u64 ixl_max_aq_speed_to_value(u8);
+int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
#endif /* _IXL_PF_H_ */
diff --git a/sys/dev/ixl/ixl_pf_i2c.c b/sys/dev/ixl/ixl_pf_i2c.c
index 55062f7f7e2a..9aea32bbe5ce 100644
--- a/sys/dev/ixl/ixl_pf_i2c.c
+++ b/sys/dev/ixl/ixl_pf_i2c.c
@@ -606,7 +606,7 @@ write_byte_out:
}
/**
- * ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register
+ * ixl_read_i2c_byte_reg - Reads 8 bit word over I2C using a hardware register
**/
s32
ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
@@ -627,7 +627,7 @@ ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
/* Get data from I2C register */
reg = rd32(hw, I40E_GLGEN_I2CCMD(hw->func_caps.mdio_port_num));
- /* Retrieve data readed from EEPROM */
+ /* Retrieve data read from EEPROM */
*data = (u8)(reg & 0xff);
if (status)
@@ -636,7 +636,7 @@ ixl_read_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
}
/**
- * ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register
+ * ixl_write_i2c_byte_reg - Writes 8 bit word over I2C using a hardware register
**/
s32
ixl_write_i2c_byte_reg(struct ixl_pf *pf, u8 byte_offset,
@@ -694,7 +694,7 @@ ixl_wait_for_i2c_completion(struct i40e_hw *hw, u8 portnum)
}
/**
- * ixl_read_i2c_byte - Reads 8 bit word over I2C using a hardware register
+ * ixl_read_i2c_byte_aq - Reads 8 bit word over I2C using an AQ command
**/
s32
ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
@@ -706,7 +706,7 @@ ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
status = i40e_aq_get_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- dev_addr,
+ dev_addr, false,
byte_offset,
&reg, NULL);
@@ -720,7 +720,7 @@ ixl_read_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
}
/**
- * ixl_write_i2c_byte - Writes 8 bit word over I2C using a hardware register
+ * ixl_write_i2c_byte_aq - Writes 8 bit word over I2C using an AQ command
**/
s32
ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
@@ -731,7 +731,7 @@ ixl_write_i2c_byte_aq(struct ixl_pf *pf, u8 byte_offset,
status = i40e_aq_set_phy_register(hw,
I40E_AQ_PHY_REG_ACCESS_EXTERNAL_MODULE,
- dev_addr,
+ dev_addr, false,
byte_offset,
data, NULL);
diff --git a/sys/dev/ixl/ixl_pf_iflib.c b/sys/dev/ixl/ixl_pf_iflib.c
new file mode 100644
index 000000000000..4351f65ee5ab
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_iflib.c
@@ -0,0 +1,1137 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2020, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixl_pf.h"
+
+void
+ixl_configure_tx_itr(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_tx_queue *que = vsi->tx_queues;
+
+ vsi->tx_itr_setting = pf->tx_itr;
+
+ for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
+ txr->latency = IXL_AVE_LATENCY;
+ }
+}
+
+void
+ixl_configure_rx_itr(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+
+ vsi->rx_itr_setting = pf->rx_itr;
+
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
+ rxr->latency = IXL_AVE_LATENCY;
+ }
+}
+
+int
+ixl_intr(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+ u32 icr0;
+
+ ++que->irqs;
+
+ /* Clear PBA at start of ISR if using legacy interrupts */
+ if (vsi->shared->isc_intr == IFLIB_INTR_LEGACY)
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+
+#ifdef PCI_IOV
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
+ iflib_iov_intr_deferred(vsi->ctx);
+#endif
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
+ iflib_admin_intr_deferred(vsi->ctx);
+
+ ixl_enable_intr0(hw);
+
+ if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
+ return (FILTER_SCHEDULE_THREAD);
+ else
+ return (FILTER_HANDLED);
+}
+
+/*********************************************************************
+ *
+ * MSI-X VSI Interrupt Service routine
+ *
+ **********************************************************************/
+int
+ixl_msix_que(void *arg)
+{
+ struct ixl_rx_queue *rx_que = arg;
+
+ ++rx_que->irqs;
+
+ ixl_set_queue_rx_itr(rx_que);
+
+ return (FILTER_SCHEDULE_THREAD);
+}
+
+/*********************************************************************
+ *
+ * MSI-X Admin Queue Interrupt Service routine
+ *
+ **********************************************************************/
+int
+ixl_msix_adminq(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ u32 reg, mask, rstat_reg;
+ bool do_task = FALSE;
+
+ DDPRINTF(dev, "begin");
+
+ ++pf->admin_irq;
+
+ reg = rd32(hw, I40E_PFINT_ICR0);
+ /*
+ * For masking off interrupt causes that need to be handled before
+ * they can be re-enabled
+ */
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* Check on the cause */
+ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
+ do_task = TRUE;
+ }
+
+ if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
+ do_task = TRUE;
+ }
+
+ if (reg & I40E_PFINT_ICR0_GRST_MASK) {
+ const char *reset_type;
+ mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
+ rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
+ rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+ >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ switch (rstat_reg) {
+ /* These others might be handled similarly to an EMPR reset */
+ case I40E_RESET_CORER:
+ reset_type = "CORER";
+ break;
+ case I40E_RESET_GLOBR:
+ reset_type = "GLOBR";
+ break;
+ case I40E_RESET_EMPR:
+ reset_type = "EMPR";
+ break;
+ default:
+ reset_type = "POR";
+ break;
+ }
+ device_printf(dev, "Reset Requested! (%s)\n", reset_type);
+ /* overload admin queue task to check reset progress */
+ atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
+ do_task = TRUE;
+ }
+
+ /*
+ * PE / PCI / ECC exceptions are all handled in the same way:
+ * mask out these three causes, then request a PF reset
+ */
+ if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
+ device_printf(dev, "ECC Error detected!\n");
+ if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
+ device_printf(dev, "PCI Exception detected!\n");
+ if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
+ device_printf(dev, "Critical Protocol Engine Error detected!\n");
+ /* Checks against the conditions above */
+ if (reg & IXL_ICR0_CRIT_ERR_MASK) {
+ mask &= ~IXL_ICR0_CRIT_ERR_MASK;
+ atomic_set_32(&pf->state,
+ IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
+ do_task = TRUE;
+ }
+
+ if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ reg = rd32(hw, I40E_PFHMC_ERRORINFO);
+ if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
+ device_printf(dev, "HMC Error detected!\n");
+ device_printf(dev, "INFO 0x%08x\n", reg);
+ reg = rd32(hw, I40E_PFHMC_ERRORDATA);
+ device_printf(dev, "DATA 0x%08x\n", reg);
+ wr32(hw, I40E_PFHMC_ERRORINFO, 0);
+ }
+ }
+
+#ifdef PCI_IOV
+ if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
+ mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ iflib_iov_intr_deferred(pf->vsi.ctx);
+ }
+#endif
+
+ wr32(hw, I40E_PFINT_ICR0_ENA, mask);
+ ixl_enable_intr0(hw);
+
+ if (do_task)
+ return (FILTER_SCHEDULE_THREAD);
+ else
+ return (FILTER_HANDLED);
+}
+
+/*
+ * Configure queue interrupt cause registers in hardware.
+ *
+ * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
+ */
+void
+ixl_configure_queue_intr_msix(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ u32 reg;
+ u16 vector = 1;
+
+ for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
+ /* Make sure interrupt is disabled */
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
+ /* Set linked list head to point to corresponding RX queue
+ * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
+ reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
+ & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
+ ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
+ & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
+ wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
+
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(i), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(i), reg);
+ }
+}
+
+/*
+ * Configure for single interrupt vector operation
+ */
+void
+ixl_configure_legacy(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ u32 reg;
+
+ vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
+
+ /* Setup "other" causes */
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
+ | I40E_PFINT_ICR0_ENA_GRST_MASK
+ | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
+ | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
+ | I40E_PFINT_ICR0_ENA_VFLR_MASK
+ | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
+ ;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ /* No ITR for non-queue interrupts */
+ wr32(hw, I40E_PFINT_STAT_CTL0,
+ IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the q int */
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
+ | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+ | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
+ | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+ | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+}
+
+void
+ixl_free_pci_resources(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = iflib_get_dev(vsi->ctx);
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
+
+ /* We may get here before stations are set up */
+ if (rx_que == NULL)
+ goto early;
+
+ /*
+ ** Release all MSI-X VSI resources:
+ */
+ iflib_irq_free(vsi->ctx, &vsi->irq);
+
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
+ iflib_irq_free(vsi->ctx, &rx_que->que_irq);
+early:
+ if (pf->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rman_get_rid(pf->pci_mem), pf->pci_mem);
+}
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+int
+ixl_setup_interface(device_t dev, struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ if_ctx_t ctx = vsi->ctx;
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = iflib_get_ifp(ctx);
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code aq_error = 0;
+
+ INIT_DBG_DEV(dev, "begin");
+
+ vsi->shared->isc_max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+
+ if (IXL_PF_IN_RECOVERY_MODE(pf))
+ goto only_auto;
+
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, TRUE, &abilities, NULL);
+ /* May need delay to detect fiber correctly */
+ if (aq_error == I40E_ERR_UNKNOWN_PHY) {
+ i40e_msec_delay(200);
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
+ TRUE, &abilities, NULL);
+ }
+ if (aq_error) {
+ if (aq_error == I40E_ERR_UNKNOWN_PHY)
+ device_printf(dev, "Unknown PHY type detected!\n");
+ else
+ device_printf(dev,
+ "Error getting supported media types, err %d,"
+ " AQ error %d\n", aq_error, hw->aq.asq_last_status);
+ } else {
+ pf->supported_speeds = abilities.link_speed;
+ if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
+
+ ixl_add_ifmedia(vsi->media, hw->phy.phy_types);
+ }
+
+only_auto:
+ /* Use autoselect media by default */
+ ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
+
+ return (0);
+}
+
+/*
+** Run when the Admin Queue gets a link state change interrupt.
+*/
+void
+ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = iflib_get_dev(pf->vsi.ctx);
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+
+ /* Request link status from adapter */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+
+ /* Print out message if an unqualified module is found */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (pf->advertised_speed) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)))
+ device_printf(dev, "Link failed because "
+ "an unqualified module was detected!\n");
+
+ /* OS link info is updated elsewhere */
+}
+
+/*********************************************************************
+ *
+ * Initialize the VSI: this handles contexts, which means things
+ * like the number of descriptors, buffer size,
+ * plus we init the rings thru this function.
+ *
+ **********************************************************************/
+int
+ixl_initialize_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
+ struct ixl_tx_queue *tx_que = vsi->tx_queues;
+ struct ixl_rx_queue *rx_que = vsi->rx_queues;
+ device_t dev = iflib_get_dev(vsi->ctx);
+ struct i40e_hw *hw = vsi->hw;
+ struct i40e_vsi_context ctxt;
+ int tc_queues;
+ int err = 0;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ if (pf->veb_seid != 0)
+ ctxt.uplink_seid = pf->veb_seid;
+ ctxt.pf_num = hw->pf_id;
+ err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (err) {
+ device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
+ " aq_error %d\n", err, hw->aq.asq_last_status);
+ return (err);
+ }
+ ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
+ "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
+ "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
+ "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
+ ctxt.uplink_seid, ctxt.vsi_number,
+ ctxt.vsis_allocated, ctxt.vsis_unallocated,
+ ctxt.flags, ctxt.pf_num, ctxt.vf_num,
+ ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
+ /*
+ ** Set the queue and traffic class bits
+ ** - when multiple traffic classes are supported
+ ** this will need to be more robust.
+ */
+ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
+ /* In contig mode, que_mapping[0] is first queue index used by this VSI */
+ ctxt.info.queue_mapping[0] = 0;
+ /*
+ * This VSI will only use traffic class 0; start traffic class 0's
+ * queue allocation at queue 0, and assign it 2^tc_queues queues (though
+ * the driver may not use all of them).
+ */
+ tc_queues = fls(pf->qtag.num_allocated) - 1;
+ ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
+ ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
+
+ /* Set VLAN receive stripping mode */
+ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
+ ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ else
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+#ifdef IXL_IW
+ /* Set TCP Enable for iWARP capable VSI */
+ if (ixl_enable_iwarp && pf->iw_enabled) {
+ ctxt.info.valid_sections |=
+ htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
+ ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
+ }
+#endif
+ /* Save VSI number and info for use later */
+ vsi->vsi_num = ctxt.vsi_number;
+ bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+
+ ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
+
+ err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (err) {
+ device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
+ " aq_error %d\n", err, hw->aq.asq_last_status);
+ return (err);
+ }
+
+ for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
+ struct tx_ring *txr = &tx_que->txr;
+ struct i40e_hmc_obj_txq tctx;
+ u32 txctl;
+
+ /* Setup the HMC TX Context */
+ bzero(&tctx, sizeof(tctx));
+ tctx.new_context = 1;
+ tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
+ tctx.qlen = scctx->isc_ntxd[0];
+ tctx.fc_ena = 0; /* Disable FCoE */
+ /*
+ * This value needs to pulled from the VSI that this queue
+ * is assigned to. Index into array is traffic class.
+ */
+ tctx.rdylist = vsi->info.qs_handle[0];
+ /*
+ * Set these to enable Head Writeback
+ * - Address is last entry in TX ring (reserved for HWB index)
+ * Leave these as 0 for Descriptor Writeback
+ */
+ if (vsi->enable_head_writeback) {
+ tctx.head_wb_ena = 1;
+ tctx.head_wb_addr = txr->tx_paddr +
+ (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
+ } else {
+ tctx.head_wb_ena = 0;
+ tctx.head_wb_addr = 0;
+ }
+ tctx.rdylist_act = 0;
+ err = i40e_clear_lan_tx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev, "Unable to clear TX context\n");
+ break;
+ }
+ err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
+ if (err) {
+ device_printf(dev, "Unable to set TX context\n");
+ break;
+ }
+ /* Associate the ring with this PF */
+ txctl = I40E_QTX_CTL_PF_QUEUE;
+ txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(i), txctl);
+ ixl_flush(hw);
+
+ /* Do ring (re)init */
+ ixl_init_tx_ring(vsi, tx_que);
+ }
+ for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
+ struct rx_ring *rxr = &rx_que->rxr;
+ struct i40e_hmc_obj_rxq rctx;
+
+ /* Next setup the HMC RX Context */
+ rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
+
+ u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
+
+ /* Set up an RX context for the HMC */
+ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ /* ignore header split for now */
+ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
+ scctx->isc_max_frame_size : max_rxmax;
+ rctx.dtype = 0;
+ rctx.dsize = 1; /* do 32byte descriptors */
+ rctx.hsplit_0 = 0; /* no header split */
+ rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
+ rctx.qlen = scctx->isc_nrxd[0];
+ rctx.tphrdesc_ena = 1;
+ rctx.tphwdesc_ena = 1;
+ rctx.tphdata_ena = 0; /* Header Split related */
+ rctx.tphhead_ena = 0; /* Header Split related */
+ rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
+ rctx.crcstrip = 1;
+ rctx.l2tsel = 1;
+ rctx.showiv = 1; /* Strip inner VLAN header */
+ rctx.fc_ena = 0; /* Disable FCoE */
+ rctx.prefena = 1; /* Prefetch descriptors */
+
+ err = i40e_clear_lan_rx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev,
+ "Unable to clear RX context %d\n", i);
+ break;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
+ if (err) {
+ device_printf(dev, "Unable to set RX context %d\n", i);
+ break;
+ }
+ wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
+ }
+ return (err);
+}
+
+
+/*
+** Provide a update to the queue RX
+** interrupt moderation value.
+*/
+void
+ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct rx_ring *rxr = &que->rxr;
+ u16 rx_itr;
+ u16 rx_latency = 0;
+ int rx_bytes;
+
+ /* Idle, do nothing */
+ if (rxr->bytes == 0)
+ return;
+
+ if (pf->dynamic_rx_itr) {
+ rx_bytes = rxr->bytes/rxr->itr;
+ rx_itr = rxr->itr;
+
+ /* Adjust latency range */
+ switch (rxr->latency) {
+ case IXL_LOW_LATENCY:
+ if (rx_bytes > 10) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (rx_bytes > 20) {
+ rx_latency = IXL_BULK_LATENCY;
+ rx_itr = IXL_ITR_8K;
+ } else if (rx_bytes <= 10) {
+ rx_latency = IXL_LOW_LATENCY;
+ rx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (rx_bytes <= 20) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ rxr->latency = rx_latency;
+
+ if (rx_itr != rxr->itr) {
+ /* do an exponential smoothing */
+ rx_itr = (10 * rx_itr * rxr->itr) /
+ ((9 * rx_itr) + rxr->itr);
+ rxr->itr = min(rx_itr, IXL_MAX_ITR);
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ rxr->me), rxr->itr);
+ }
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->rx_itr_setting = pf->rx_itr;
+ /* Update the hardware if needed */
+ if (rxr->itr != vsi->rx_itr_setting) {
+ rxr->itr = vsi->rx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ rxr->me), rxr->itr);
+ }
+ }
+ rxr->bytes = 0;
+ rxr->packets = 0;
+}
+
+
+/*
+** Provide a update to the queue TX
+** interrupt moderation value.
+*/
+void
+ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ u16 tx_itr;
+ u16 tx_latency = 0;
+ int tx_bytes;
+
+
+ /* Idle, do nothing */
+ if (txr->bytes == 0)
+ return;
+
+ if (pf->dynamic_tx_itr) {
+ tx_bytes = txr->bytes/txr->itr;
+ tx_itr = txr->itr;
+
+ switch (txr->latency) {
+ case IXL_LOW_LATENCY:
+ if (tx_bytes > 10) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (tx_bytes > 20) {
+ tx_latency = IXL_BULK_LATENCY;
+ tx_itr = IXL_ITR_8K;
+ } else if (tx_bytes <= 10) {
+ tx_latency = IXL_LOW_LATENCY;
+ tx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (tx_bytes <= 20) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ txr->latency = tx_latency;
+
+ if (tx_itr != txr->itr) {
+ /* do an exponential smoothing */
+ tx_itr = (10 * tx_itr * txr->itr) /
+ ((9 * tx_itr) + txr->itr);
+ txr->itr = min(tx_itr, IXL_MAX_ITR);
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ txr->me), txr->itr);
+ }
+
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->tx_itr_setting = pf->tx_itr;
+ /* Update the hardware if needed */
+ if (txr->itr != vsi->tx_itr_setting) {
+ txr->itr = vsi->tx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ txr->me), txr->itr);
+ }
+ }
+ txr->bytes = 0;
+ txr->packets = 0;
+ return;
+}
+
+#ifdef IXL_DEBUG
+/**
+ * ixl_sysctl_qtx_tail_handler
+ * Retrieves I40E_QTX_TAIL value from hardware
+ * for a sysctl.
+ */
+int
+ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_tx_queue *tx_que;
+ int error;
+ u32 val;
+
+ tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
+ if (!tx_que) return 0;
+
+ val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+
+/**
+ * ixl_sysctl_qrx_tail_handler
+ * Retrieves I40E_QRX_TAIL value from hardware
+ * for a sysctl.
+ */
+int
+ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_rx_queue *rx_que;
+ int error;
+ u32 val;
+
+ rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
+ if (!rx_que) return 0;
+
+ val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+#endif
+
+void
+ixl_add_hw_stats(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = iflib_get_dev(vsi->ctx);
+ struct i40e_hw_port_stats *pf_stats = &pf->stats;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+
+ /* Driver statistics */
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
+ CTLFLAG_RD, &pf->admin_irq,
+ "Admin Queue IRQs received");
+
+ sysctl_ctx_init(&vsi->sysctl_ctx);
+ ixl_vsi_add_sysctls(vsi, "pf", true);
+
+ ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
+}
+
+void
+ixl_set_rss_hlut(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = iflib_get_dev(vsi->ctx);
+ int i, que_id;
+ int lut_entry_width;
+ u32 lut = 0;
+ enum i40e_status_code status;
+
+ lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ u8 hlut_buf[512];
+ for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % vsi->num_rx_queues;
+#else
+ que_id = i % vsi->num_rx_queues;
+#endif
+ lut = (que_id & ((0x1 << lut_entry_width) - 1));
+ hlut_buf[i] = lut;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
+ if (status)
+ device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
+ ixl_flush(hw);
+ }
+}
+
+/*
+** This routine updates vlan filters, called by init
+** it scans the filter table and then updates the hw
+** after a soft reset.
+*/
+void
+ixl_setup_vlan_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+ int cnt = 0, flags;
+
+ if (vsi->num_vlans == 0)
+ return;
+ /*
+ ** Scan the filter list for vlan entries,
+ ** mark them for addition and then call
+ ** for the AQ update.
+ */
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (f->flags & IXL_FILTER_VLAN) {
+ f->flags |=
+ (IXL_FILTER_ADD |
+ IXL_FILTER_USED);
+ cnt++;
+ }
+ }
+ if (cnt == 0) {
+ printf("setup vlan: no filters found!\n");
+ return;
+ }
+ flags = IXL_FILTER_VLAN;
+ flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ ixl_add_hw_filters(vsi, flags, cnt);
+}
+
+/* For PF VSI only */
+int
+ixl_enable_rings(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_tx_queues; i++)
+ error = ixl_enable_tx_ring(pf, &pf->qtag, i);
+
+ for (int i = 0; i < vsi->num_rx_queues; i++)
+ error = ixl_enable_rx_ring(pf, &pf->qtag, i);
+
+ return (error);
+}
+
+int
+ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
+{
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_tx_queues; i++)
+ error = ixl_disable_tx_ring(pf, qtag, i);
+
+ for (int i = 0; i < vsi->num_rx_queues; i++)
+ error = ixl_disable_rx_ring(pf, qtag, i);
+
+ return (error);
+}
+
+void
+ixl_enable_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ ixl_enable_queue(hw, que->rxr.me);
+ } else
+ ixl_enable_intr0(hw);
+}
+
+void
+ixl_disable_rings_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_rx_queue *que = vsi->rx_queues;
+
+ for (int i = 0; i < vsi->num_rx_queues; i++, que++)
+ ixl_disable_queue(hw, que->rxr.me);
+}
+
+int
+ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ if (is_up)
+ ixl_if_stop(pf->vsi.ctx);
+
+ ixl_shutdown_hmc(pf);
+
+ ixl_disable_intr0(hw);
+
+ error = i40e_shutdown_adminq(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown Admin queue failed with code %d\n", error);
+
+ ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
+ return (error);
+}
+
+int
+ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf, bool is_up)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ enum i40e_get_fw_lldp_status_resp lldp_status;
+ int error = 0;
+
+ device_printf(dev, "Rebuilding driver state...\n");
+
+ /* Setup */
+ error = i40e_init_adminq(hw);
+ if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
+ device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
+ error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ if (IXL_PF_IN_RECOVERY_MODE(pf)) {
+ /* Keep admin queue interrupts active while driver is loaded */
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+ }
+
+ return (0);
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+ error = ixl_get_hw_capabilities(pf);
+ if (error) {
+ device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ error = ixl_setup_hmc(pf);
+ if (error)
+ goto ixl_rebuild_hw_structs_after_reset_err;
+
+ /* reserve a contiguous allocation for the PF's VSI */
+ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
+ if (error) {
+ device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
+ error);
+ }
+
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
+ error);
+ error = EIO;
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
+ NULL);
+ if (error) {
+ device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
+ " aq_err %d\n", error, hw->aq.asq_last_status);
+ error = EIO;
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ u8 set_fc_err_mask;
+ error = i40e_set_fc(hw, &set_fc_err_mask, true);
+ if (error) {
+ device_printf(dev, "init: setting link flow control failed; retcode %d,"
+ " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
+ error = EIO;
+ goto ixl_rebuild_hw_structs_after_reset_err;
+ }
+
+ /* Remove default filters reinstalled by FW on reset */
+ ixl_del_default_hw_filters(vsi);
+
+ /* Receive broadcast Ethernet frames */
+ i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
+
+ /* Determine link state */
+ if (ixl_attach_get_link_status(pf)) {
+ error = EINVAL;
+ }
+
+ i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
+
+ /* Query device FW LLDP status */
+ if (i40e_get_fw_lldp_status(hw, &lldp_status) == I40E_SUCCESS) {
+ if (lldp_status == I40E_GET_FW_LLDP_STATUS_DISABLED) {
+ atomic_set_32(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ } else {
+ atomic_clear_32(&pf->state,
+ IXL_PF_STATE_FW_LLDP_DISABLED);
+ }
+ }
+
+ /* Keep admin queue interrupts active while driver is loaded */
+ if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_intr0(hw);
+ }
+
+ if (is_up) {
+ iflib_request_reset(vsi->ctx);
+ iflib_admin_intr_deferred(vsi->ctx);
+ }
+
+ device_printf(dev, "Rebuilding driver state done.\n");
+ return (0);
+
+ixl_rebuild_hw_structs_after_reset_err:
+ device_printf(dev, "Reload the driver to recover\n");
+ return (error);
+}
+
+/*
+** Set flow control using sysctl:
+** 0 - off
+** 1 - rx pause
+** 2 - tx pause
+** 3 - full
+*/
+int
+ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int requested_fc, error = 0;
+ enum i40e_status_code aq_error = 0;
+ u8 fc_aq_err = 0;
+
+ /* Get request */
+ requested_fc = pf->fc;
+ error = sysctl_handle_int(oidp, &requested_fc, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (requested_fc < 0 || requested_fc > 3) {
+ device_printf(dev,
+ "Invalid fc mode; valid modes are 0 through 3\n");
+ return (EINVAL);
+ }
+
+ /* Set fc ability for port */
+ hw->fc.requested_mode = requested_fc;
+ aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error setting new fc mode %d; fc_err %#x\n",
+ __func__, aq_error, fc_aq_err);
+ return (EIO);
+ }
+ pf->fc = requested_fc;
+
+ return (0);
+}
diff --git a/sys/dev/ixl/ixl_pf_iov.c b/sys/dev/ixl/ixl_pf_iov.c
index e3184eea22f6..92e434eab9fc 100644
--- a/sys/dev/ixl/ixl_pf_iov.c
+++ b/sys/dev/ixl/ixl_pf_iov.c
@@ -39,9 +39,6 @@ static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum
static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
-static bool ixl_zero_mac(const uint8_t *addr);
-static bool ixl_bcast_mac(const uint8_t *addr);
-
static int ixl_vc_opcode_level(uint16_t opcode);
static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
@@ -117,8 +114,9 @@ ixl_initialize_sriov(struct ixl_pf *pf)
iov_error);
} else
device_printf(dev, "SR-IOV ready\n");
-}
+ pf->vc_debug_lvl = 1;
+}
/*
* Allocate the VSI for a VF.
@@ -203,20 +201,21 @@ ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
int error;
hw = &pf->hw;
+ vf->vsi.flags |= IXL_FLAGS_IS_VF;
error = ixl_vf_alloc_vsi(pf, vf);
if (error != 0)
return (error);
+ vf->vsi.dev = pf->dev;
+
+ ixl_init_filters(&vf->vsi);
/* Let VF receive broadcast Ethernet frames */
error = i40e_aq_set_vsi_broadcast(hw, vf->vsi.seid, TRUE, NULL);
if (error)
device_printf(pf->dev, "Error configuring VF VSI for broadcast promiscuous\n");
/* Re-add VF's MAC/VLAN filters to its VSI */
ixl_reconfigure_filters(&vf->vsi);
- /* Reset stats? */
- vf->vsi.hw_filters_add = 0;
- vf->vsi.hw_filters_del = 0;
return (0);
}
@@ -488,33 +487,36 @@ static void
ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
- struct virtchnl_version_info reply;
+ struct virtchnl_version_info *recv_vf_version;
+ device_t dev = pf->dev;
- if (msg_size != sizeof(struct virtchnl_version_info)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_VERSION,
- I40E_ERR_PARAM);
- return;
- }
+ recv_vf_version = (struct virtchnl_version_info *)msg;
- vf->version = ((struct virtchnl_version_info *)msg)->minor;
+ /* VFs running the 1.0 API expect to get 1.0 back */
+ if (VF_IS_V10(recv_vf_version)) {
+ vf->version.major = 1;
+ vf->version.minor = VIRTCHNL_VERSION_MINOR_NO_VF_CAPS;
+ } else {
+ vf->version.major = VIRTCHNL_VERSION_MAJOR;
+ vf->version.minor = VIRTCHNL_VERSION_MINOR;
+
+ if ((recv_vf_version->major != VIRTCHNL_VERSION_MAJOR) ||
+ (recv_vf_version->minor != VIRTCHNL_VERSION_MINOR))
+ device_printf(dev,
+ "%s: VF-%d requested version (%d.%d) differs from PF version (%d.%d)\n",
+ __func__, vf->vf_num,
+ recv_vf_version->major, recv_vf_version->minor,
+ VIRTCHNL_VERSION_MAJOR, VIRTCHNL_VERSION_MINOR);
+ }
- reply.major = VIRTCHNL_VERSION_MAJOR;
- reply.minor = VIRTCHNL_VERSION_MINOR;
- ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
- sizeof(reply));
+ ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_VERSION, I40E_SUCCESS,
+ &vf->version, sizeof(vf->version));
}
static void
ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
uint16_t msg_size)
{
-
- if (msg_size != 0) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_RESET_VF,
- I40E_ERR_PARAM);
- return;
- }
-
ixl_reset_vf(pf, vf);
/* No response to a reset message. */
@@ -526,19 +528,9 @@ ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
struct virtchnl_vf_resource reply;
- if ((vf->version == 0 && msg_size != 0) ||
- (vf->version == 1 && msg_size != 4)) {
- device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
- " for VF version %d.%d\n", VIRTCHNL_VERSION_MAJOR,
- vf->version);
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_ERR_PARAM);
- return;
- }
-
bzero(&reply, sizeof(reply));
- if (vf->version == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
+ if (vf->version.minor == VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
reply.vf_cap_flags = VIRTCHNL_VF_OFFLOAD_L2 |
VIRTCHNL_VF_OFFLOAD_RSS_REG |
VIRTCHNL_VF_OFFLOAD_VLAN;
@@ -681,15 +673,8 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
struct virtchnl_vsi_queue_config_info *info;
struct virtchnl_queue_pair_info *pair;
- uint16_t expected_msg_size;
int i;
- if (msg_size < sizeof(*info)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
info = msg;
if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_tx_queues) {
device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
@@ -699,15 +684,6 @@ ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
return;
}
- expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
- if (msg_size != expected_msg_size) {
- device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
- vf->vf_num, msg_size, expected_msg_size);
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
if (info->vsi_id != vf->vsi.vsi_num) {
device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
@@ -839,25 +815,7 @@ ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
int i, largest_txq, largest_rxq;
hw = &pf->hw;
-
- if (msg_size < sizeof(*map)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
map = msg;
- if (map->num_vectors == 0) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
for (i = 0; i < map->num_vectors; i++) {
vector = &map->vecmap[i];
@@ -910,13 +868,8 @@ ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
struct virtchnl_queue_select *select;
int error = 0;
- if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
select = msg;
+
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ENABLE_QUEUES,
@@ -989,13 +942,8 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
struct virtchnl_queue_select *select;
int error = 0;
- if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
select = msg;
+
if (select->vsi_id != vf->vsi.vsi_num ||
select->rx_queues == 0 || select->tx_queues == 0) {
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES,
@@ -1064,28 +1012,11 @@ ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
ixl_send_vf_ack(pf, vf, VIRTCHNL_OP_DISABLE_QUEUES);
}
-static bool
-ixl_zero_mac(const uint8_t *addr)
-{
- uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
-
- return (cmp_etheraddr(addr, zero));
-}
-
-static bool
-ixl_bcast_mac(const uint8_t *addr)
-{
- static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
- return (cmp_etheraddr(addr, ixl_bcast_addr));
-}
-
static int
ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
{
- if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
+ if (ETHER_IS_ZERO(addr) || ETHER_IS_BROADCAST(addr))
return (EINVAL);
/*
@@ -1108,23 +1039,11 @@ ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
struct virtchnl_ether_addr *addr;
struct ixl_vsi *vsi;
int i;
- size_t expected_size;
vsi = &vf->vsi;
-
- if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- I40E_ERR_PARAM);
- return;
- }
-
addr_list = msg;
- expected_size = sizeof(*addr_list) +
- addr_list->num_elements * sizeof(*addr);
- if (addr_list->num_elements == 0 ||
- addr_list->vsi_id != vsi->vsi_num ||
- msg_size != expected_size) {
+ if (addr_list->vsi_id != vsi->vsi_num) {
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
I40E_ERR_PARAM);
return;
@@ -1152,32 +1071,23 @@ ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
struct virtchnl_ether_addr_list *addr_list;
struct virtchnl_ether_addr *addr;
- size_t expected_size;
+ struct ixl_vsi *vsi;
int i;
- if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
- I40E_ERR_PARAM);
- return;
- }
-
+ vsi = &vf->vsi;
addr_list = msg;
- expected_size = sizeof(*addr_list) +
- addr_list->num_elements * sizeof(*addr);
- if (addr_list->num_elements == 0 ||
- addr_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_ETH_ADDR,
+ if (addr_list->vsi_id != vsi->vsi_num) {
+ i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_ETH_ADDR,
I40E_ERR_PARAM);
return;
}
for (i = 0; i < addr_list->num_elements; i++) {
addr = &addr_list->list[i];
- if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
+ if (ETHER_IS_ZERO(addr->addr) || ETHER_IS_BROADCAST(addr->addr)) {
i40e_send_vf_nack(pf, vf,
- VIRTCHNL_OP_ADD_ETH_ADDR, I40E_ERR_PARAM);
+ VIRTCHNL_OP_DEL_ETH_ADDR, I40E_ERR_PARAM);
return;
}
}
@@ -1210,21 +1120,11 @@ ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
struct virtchnl_vlan_filter_list *filter_list;
enum i40e_status_code code;
- size_t expected_size;
int i;
- if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
filter_list = msg;
- expected_size = sizeof(*filter_list) +
- filter_list->num_elements * sizeof(uint16_t);
- if (filter_list->num_elements == 0 ||
- filter_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
+
+ if (filter_list->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_ADD_VLAN,
I40E_ERR_PARAM);
return;
@@ -1262,20 +1162,10 @@ ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
struct virtchnl_vlan_filter_list *filter_list;
int i;
- size_t expected_size;
-
- if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
- I40E_ERR_PARAM);
- return;
- }
filter_list = msg;
- expected_size = sizeof(*filter_list) +
- filter_list->num_elements * sizeof(uint16_t);
- if (filter_list->num_elements == 0 ||
- filter_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
+
+ if (filter_list->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_DEL_VLAN,
I40E_ERR_PARAM);
return;
@@ -1309,12 +1199,6 @@ ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
struct i40e_hw *hw = &pf->hw;
enum i40e_status_code code;
- if (msg_size != sizeof(*info)) {
- i40e_send_vf_nack(pf, vf,
- VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
- return;
- }
-
if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
/*
* Do the same thing as the Linux PF driver -- lie to the VF
@@ -1362,12 +1246,6 @@ ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
{
struct virtchnl_queue_select *queue;
- if (msg_size != sizeof(*queue)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
- I40E_ERR_PARAM);
- return;
- }
-
queue = msg;
if (queue->vsi_id != vf->vsi.vsi_num) {
i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_GET_STATS,
@@ -1392,12 +1270,6 @@ ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
hw = &pf->hw;
- if (msg_size < sizeof(*key)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_KEY,
- I40E_ERR_PARAM);
- return;
- }
-
key = msg;
if (key->key_len > 52) {
@@ -1454,12 +1326,6 @@ ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
hw = &pf->hw;
- if (msg_size < sizeof(*lut)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_CONFIG_RSS_LUT,
- I40E_ERR_PARAM);
- return;
- }
-
lut = msg;
if (lut->lut_entries > 64) {
@@ -1507,13 +1373,6 @@ ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
struct virtchnl_rss_hena *hena;
hw = &pf->hw;
-
- if (msg_size < sizeof(*hena)) {
- i40e_send_vf_nack(pf, vf, VIRTCHNL_OP_SET_RSS_HENA,
- I40E_ERR_PARAM);
- return;
- }
-
hena = msg;
/* Set HENA */
@@ -1537,7 +1396,7 @@ ixl_notify_vf_link_state(struct ixl_pf *pf, struct ixl_vf *vf)
event.severity = PF_EVENT_SEVERITY_INFO;
event.event_data.link_event.link_status = pf->vsi.link_active;
event.event_data.link_event.link_speed =
- (enum virtchnl_link_speed)hw->phy.link_info.link_speed;
+ i40e_virtchnl_link_speed(hw->phy.link_info.link_speed);
ixl_send_vf_msg(pf, vf, VIRTCHNL_OP_EVENT, I40E_SUCCESS, &event,
sizeof(event));
@@ -1555,10 +1414,12 @@ ixl_broadcast_link_state(struct ixl_pf *pf)
void
ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
{
+ device_t dev = pf->dev;
struct ixl_vf *vf;
- void *msg;
uint16_t vf_num, msg_size;
uint32_t opcode;
+ void *msg;
+ int err;
vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
opcode = le32toh(event->desc.cookie_high);
@@ -1578,6 +1439,15 @@ ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
(vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
vf_num, msg_size);
+ /* Perform basic checks on the msg */
+ err = virtchnl_vc_validate_vf_msg(&vf->version, opcode, msg, msg_size);
+ if (err) {
+ device_printf(dev, "%s: Received invalid msg from VF-%d: opcode %d, len %d, error %d\n",
+ __func__, vf->vf_num, opcode, msg_size, err);
+ i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_PARAM);
+ return;
+ }
+
/* This must be a stray msg from a previously destroyed VF. */
if (!(vf->vf_flags & VF_FLAG_ENABLED))
return;
@@ -1785,7 +1655,7 @@ ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
struct i40e_hw *hw;
struct ixl_vsi *pf_vsi;
enum i40e_status_code ret;
- int i, error;
+ int error;
hw = &pf->hw;
pf_vsi = &pf->vsi;
@@ -1797,9 +1667,6 @@ ixl_if_iov_init(if_ctx_t ctx, uint16_t num_vfs, const nvlist_t *params)
goto fail;
}
- for (i = 0; i < num_vfs; i++)
- sysctl_ctx_init(&pf->vfs[i].ctx);
-
/*
* Add the VEB and ...
* - do nothing: VEPA mode
@@ -1872,7 +1739,7 @@ ixl_if_iov_uninit(if_ctx_t ctx)
/* sysctl_ctx_free might sleep, but this func is called w/ an sx lock */
for (i = 0; i < num_vfs; i++)
- sysctl_ctx_free(&vfs[i].ctx);
+ sysctl_ctx_free(&vfs[i].vsi.sysctl_ctx);
free(vfs, M_IXL);
}
@@ -1911,8 +1778,7 @@ int
ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
{
struct ixl_pf *pf = iflib_get_softc(ctx);
- device_t dev = pf->dev;
- char sysctl_name[QUEUE_NAME_LEN];
+ char sysctl_name[IXL_QUEUE_NAME_LEN];
struct ixl_vf *vf;
const void *mac;
size_t size;
@@ -1923,7 +1789,6 @@ ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
vf->vf_num = vfnum;
vf->vsi.back = pf;
vf->vf_flags = VF_FLAG_ENABLED;
- SLIST_INIT(&vf->vsi.ftl);
/* Reserve queue allocation from PF */
vf_num_queues = nvlist_get_number(params, "num-queues");
@@ -1961,7 +1826,7 @@ ixl_if_iov_vf_add(if_ctx_t ctx, uint16_t vfnum, const nvlist_t *params)
out:
if (error == 0) {
snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
- ixl_add_vsi_sysctls(dev, &vf->vsi, &vf->ctx, sysctl_name);
+ ixl_vsi_add_sysctls(&vf->vsi, sysctl_name, false);
}
return (error);
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
index 7979aa0a69d9..e0ccdebc7cfa 100644
--- a/sys/dev/ixl/ixl_pf_main.c
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -46,9 +46,13 @@
static u8 ixl_convert_sysctl_aq_link_speed(u8, bool);
static void ixl_sbuf_print_bytes(struct sbuf *, u8 *, int, int, bool);
+static const char * ixl_link_speed_string(enum i40e_aq_link_speed);
+static u_int ixl_add_maddr(void *, struct sockaddr_dl *, u_int);
+static u_int ixl_match_maddr(void *, struct sockaddr_dl *, u_int);
+static char * ixl_switch_element_string(struct sbuf *, u8, u16);
+static enum ixl_fw_mode ixl_get_fw_mode(struct ixl_pf *);
/* Sysctls */
-static int ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_supported_speeds(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS);
@@ -76,12 +80,13 @@ static int ixl_sysctl_fec_rs_request(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fec_auto_enable(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
+
+/* Debug Sysctls */
static int ixl_sysctl_do_pf_reset(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_do_core_reset(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_queue_interrupt_table(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS);
#ifdef IXL_DEBUG
static int ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
@@ -92,10 +97,7 @@ extern int ixl_enable_iwarp;
extern int ixl_limit_iwarp_msix;
#endif
-const uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
-const char * const ixl_fc_string[6] = {
+static const char * const ixl_fc_string[6] = {
"None",
"Rx",
"Tx",
@@ -148,44 +150,144 @@ ixl_print_nvm_version(struct ixl_pf *pf)
sbuf_delete(sbuf);
}
-static void
-ixl_configure_tx_itr(struct ixl_pf *pf)
+/**
+ * ixl_get_fw_mode - Check the state of FW
+ * @hw: device hardware structure
+ *
+ * Identify state of FW. It might be in a recovery mode
+ * which limits functionality and requires special handling
+ * from the driver.
+ *
+ * @returns FW mode (normal, recovery, unexpected EMP reset)
+ */
+static enum ixl_fw_mode
+ixl_get_fw_mode(struct ixl_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_tx_queue *que = vsi->tx_queues;
+ struct i40e_hw *hw = &pf->hw;
+ enum ixl_fw_mode fw_mode = IXL_FW_MODE_NORMAL;
+ u32 fwsts;
- vsi->tx_itr_setting = pf->tx_itr;
+#ifdef IXL_DEBUG
+ if (pf->recovery_mode)
+ return IXL_FW_MODE_RECOVERY;
+#endif
+ fwsts = rd32(hw, I40E_GL_FWSTS) & I40E_GL_FWSTS_FWS1B_MASK;
+
+ /* Is set and has one of expected values */
+ if ((fwsts >= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK &&
+ fwsts <= I40E_XL710_GL_FWSTS_FWS1B_REC_MOD_NVM_MASK) ||
+ fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_GLOBR_MASK ||
+ fwsts == I40E_X722_GL_FWSTS_FWS1B_REC_MOD_CORER_MASK)
+ fw_mode = IXL_FW_MODE_RECOVERY;
+ else {
+ if (fwsts > I40E_GL_FWSTS_FWS1B_EMPR_0 &&
+ fwsts <= I40E_GL_FWSTS_FWS1B_EMPR_10)
+ fw_mode = IXL_FW_MODE_UEMPR;
+ }
+ return (fw_mode);
+}
- for (int i = 0; i < vsi->num_tx_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
+/**
+ * ixl_pf_reset - Reset the PF
+ * @pf: PF structure
+ *
+ * Ensure that FW is in the right state and do the reset
+ * if needed.
+ *
+ * @returns zero on success, or an error code on failure.
+ */
+int
+ixl_pf_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ enum i40e_status_code status;
+ enum ixl_fw_mode fw_mode;
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
- vsi->tx_itr_setting);
- txr->itr = vsi->tx_itr_setting;
- txr->latency = IXL_AVE_LATENCY;
+ fw_mode = ixl_get_fw_mode(pf);
+ ixl_dbg_info(pf, "%s: before PF reset FW mode: 0x%08x\n", __func__, fw_mode);
+ if (fw_mode == IXL_FW_MODE_RECOVERY) {
+ atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
+ /* Don't try to reset device if it's in recovery mode */
+ return (0);
}
+
+ status = i40e_pf_reset(hw);
+ if (status == I40E_SUCCESS)
+ return (0);
+
+ /* Check FW mode again in case it has changed while
+ * waiting for reset to complete */
+ fw_mode = ixl_get_fw_mode(pf);
+ ixl_dbg_info(pf, "%s: after PF reset FW mode: 0x%08x\n", __func__, fw_mode);
+ if (fw_mode == IXL_FW_MODE_RECOVERY) {
+ atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
+ return (0);
+ }
+
+ if (fw_mode == IXL_FW_MODE_UEMPR)
+ device_printf(pf->dev,
+ "Entering recovery mode due to repeated FW resets. This may take several minutes. Refer to the Intel(R) Ethernet Adapters and Devices User Guide.\n");
+ else
+ device_printf(pf->dev, "PF reset failure %s\n",
+ i40e_stat_str(hw, status));
+ return (EIO);
}
-static void
-ixl_configure_rx_itr(struct ixl_pf *pf)
+/**
+ * ixl_setup_hmc - Setup LAN Host Memory Cache
+ * @pf: PF structure
+ *
+ * Init and configure LAN Host Memory Cache
+ *
+ * @returns 0 on success, EIO on error
+ */
+int
+ixl_setup_hmc(struct ixl_pf *pf)
{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_rx_queue *que = vsi->rx_queues;
-
- vsi->rx_itr_setting = pf->rx_itr;
+ struct i40e_hw *hw = &pf->hw;
+ enum i40e_status_code status;
- for (int i = 0; i < vsi->num_rx_queues; i++, que++) {
- struct rx_ring *rxr = &que->rxr;
+ status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (status) {
+ device_printf(pf->dev, "init_lan_hmc failed: %s\n",
+ i40e_stat_str(hw, status));
+ return (EIO);
+ }
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
- vsi->rx_itr_setting);
- rxr->itr = vsi->rx_itr_setting;
- rxr->latency = IXL_AVE_LATENCY;
+ status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (status) {
+ device_printf(pf->dev, "configure_lan_hmc failed: %s\n",
+ i40e_stat_str(hw, status));
+ return (EIO);
}
+
+ return (0);
}
+/**
+ * ixl_shutdown_hmc - Shutdown LAN Host Memory Cache
+ * @pf: PF structure
+ *
+ * Shutdown Host Memory Cache if configured.
+ *
+ */
+void
+ixl_shutdown_hmc(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ enum i40e_status_code status;
+
+ /* HMC not configured, no need to shutdown */
+ if (hw->hmc.hmc_obj == NULL)
+ return;
+
+ status = i40e_shutdown_lan_hmc(hw);
+ if (status)
+ device_printf(pf->dev,
+ "Shutdown LAN HMC failed with code %s\n",
+ i40e_stat_str(hw, status));
+}
/*
* Write PF ITR values to queue ITR registers.
*/
@@ -213,6 +315,11 @@ ixl_get_hw_capabilities(struct ixl_pf *pf)
bool again = TRUE;
u16 needed;
+ if (IXL_PF_IN_RECOVERY_MODE(pf)) {
+ hw->func_caps.iwarp = 0;
+ return (0);
+ }
+
len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
retry:
if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
@@ -247,10 +354,8 @@ retry:
/* Determine functions to use for driver I2C accesses */
switch (pf->i2c_access_method) {
- case 0: {
- if (hw->mac.type == I40E_MAC_XL710 &&
- hw->aq.api_maj_ver == 1 &&
- hw->aq.api_min_ver >= 7) {
+ case IXL_I2C_ACCESS_METHOD_BEST_AVAILABLE: {
+ if (hw->flags & I40E_HW_FLAG_AQ_PHY_ACCESS_CAPABLE) {
pf->read_i2c_byte = ixl_read_i2c_byte_aq;
pf->write_i2c_byte = ixl_write_i2c_byte_aq;
} else {
@@ -259,15 +364,15 @@ retry:
}
break;
}
- case 3:
+ case IXL_I2C_ACCESS_METHOD_AQ:
pf->read_i2c_byte = ixl_read_i2c_byte_aq;
pf->write_i2c_byte = ixl_write_i2c_byte_aq;
break;
- case 2:
+ case IXL_I2C_ACCESS_METHOD_REGISTER_I2CCMD:
pf->read_i2c_byte = ixl_read_i2c_byte_reg;
pf->write_i2c_byte = ixl_write_i2c_byte_reg;
break;
- case 1:
+ case IXL_I2C_ACCESS_METHOD_BIT_BANG_I2CPARAMS:
pf->read_i2c_byte = ixl_read_i2c_byte_bb;
pf->write_i2c_byte = ixl_write_i2c_byte_bb;
break;
@@ -344,299 +449,6 @@ err_out:
return (status);
}
-int
-ixl_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- u32 reg;
- int error = 0;
-
- // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
- i40e_clear_hw(hw);
- error = i40e_pf_reset(hw);
- if (error) {
- device_printf(dev, "init: PF reset failure\n");
- error = EIO;
- goto err_out;
- }
-
- error = i40e_init_adminq(hw);
- if (error) {
- device_printf(dev, "init: Admin queue init failure;"
- " status code %d\n", error);
- error = EIO;
- goto err_out;
- }
-
- i40e_clear_pxe_mode(hw);
-
-#if 0
- error = ixl_get_hw_capabilities(pf);
- if (error) {
- device_printf(dev, "init: Error retrieving HW capabilities;"
- " status code %d\n", error);
- goto err_out;
- }
-
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init: LAN HMC init failed; status code %d\n",
- error);
- error = EIO;
- goto err_out;
- }
-
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (error) {
- device_printf(dev, "init: LAN HMC config failed; status code %d\n",
- error);
- error = EIO;
- goto err_out;
- }
-
- // XXX: possible fix for panic, but our failure recovery is still broken
- error = ixl_switch_config(pf);
- if (error) {
- device_printf(dev, "init: ixl_switch_config() failed: %d\n",
- error);
- goto err_out;
- }
-
- error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
- NULL);
- if (error) {
- device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
- " aq_err %d\n", error, hw->aq.asq_last_status);
- error = EIO;
- goto err_out;
- }
-
- error = i40e_set_fc(hw, &set_fc_err_mask, true);
- if (error) {
- device_printf(dev, "init: setting link flow control failed; retcode %d,"
- " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
- goto err_out;
- }
-
- // XXX: (Rebuild VSIs?)
-
- /* Firmware delay workaround */
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)) {
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "init: link restart failed, aq_err %d\n",
- hw->aq.asq_last_status);
- goto err_out;
- }
- }
-
-
- /* Re-enable admin queue interrupt */
- if (pf->msix > 1) {
- ixl_configure_intr0_msix(pf);
- ixl_enable_intr0(hw);
- }
-
-err_out:
- return (error);
-#endif
- ixl_rebuild_hw_structs_after_reset(pf);
-
- /* The PF reset should have cleared any critical errors */
- atomic_clear_32(&pf->state, IXL_PF_STATE_PF_CRIT_ERR);
- atomic_clear_32(&pf->state, IXL_PF_STATE_PF_RESET_REQ);
-
- reg = rd32(hw, I40E_PFINT_ICR0_ENA);
- reg |= IXL_ICR0_CRIT_ERR_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- err_out:
- return (error);
-}
-
-/*
- * TODO: Make sure this properly handles admin queue / single rx queue intr
- */
-int
-ixl_intr(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_rx_queue *que = vsi->rx_queues;
- u32 icr0;
-
- // pf->admin_irq++
- ++que->irqs;
-
-// TODO: Check against proper field
-#if 0
- /* Clear PBA at start of ISR if using legacy interrupts */
- if (pf->msix == 0)
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT));
-#endif
-
- icr0 = rd32(hw, I40E_PFINT_ICR0);
-
-
-#ifdef PCI_IOV
- if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
- iflib_iov_intr_deferred(vsi->ctx);
-#endif
-
- // TODO!: Do the stuff that's done in ixl_msix_adminq here, too!
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK)
- iflib_admin_intr_deferred(vsi->ctx);
-
- // TODO: Is intr0 enabled somewhere else?
- ixl_enable_intr0(hw);
-
- if (icr0 & I40E_PFINT_ICR0_QUEUE_0_MASK)
- return (FILTER_SCHEDULE_THREAD);
- else
- return (FILTER_HANDLED);
-}
-
-
-/*********************************************************************
- *
- * MSI-X VSI Interrupt Service routine
- *
- **********************************************************************/
-int
-ixl_msix_que(void *arg)
-{
- struct ixl_rx_queue *rx_que = arg;
-
- ++rx_que->irqs;
-
- ixl_set_queue_rx_itr(rx_que);
- // ixl_set_queue_tx_itr(que);
-
- return (FILTER_SCHEDULE_THREAD);
-}
-
-
-/*********************************************************************
- *
- * MSI-X Admin Queue Interrupt Service routine
- *
- **********************************************************************/
-int
-ixl_msix_adminq(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- u32 reg, mask, rstat_reg;
- bool do_task = FALSE;
-
- DDPRINTF(dev, "begin");
-
- ++pf->admin_irq;
-
- reg = rd32(hw, I40E_PFINT_ICR0);
- /*
- * For masking off interrupt causes that need to be handled before
- * they can be re-enabled
- */
- mask = rd32(hw, I40E_PFINT_ICR0_ENA);
-
- /* Check on the cause */
- if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
- mask &= ~I40E_PFINT_ICR0_ENA_ADMINQ_MASK;
- do_task = TRUE;
- }
-
- if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
- mask &= ~I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
- atomic_set_32(&pf->state, IXL_PF_STATE_MDD_PENDING);
- do_task = TRUE;
- }
-
- if (reg & I40E_PFINT_ICR0_GRST_MASK) {
- mask &= ~I40E_PFINT_ICR0_ENA_GRST_MASK;
- device_printf(dev, "Reset Requested!\n");
- rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
- rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
- >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- device_printf(dev, "Reset type: ");
- switch (rstat_reg) {
- /* These others might be handled similarly to an EMPR reset */
- case I40E_RESET_CORER:
- printf("CORER\n");
- break;
- case I40E_RESET_GLOBR:
- printf("GLOBR\n");
- break;
- case I40E_RESET_EMPR:
- printf("EMPR\n");
- break;
- default:
- printf("POR\n");
- break;
- }
- /* overload admin queue task to check reset progress */
- atomic_set_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
- do_task = TRUE;
- }
-
- /*
- * PE / PCI / ECC exceptions are all handled in the same way:
- * mask out these three causes, then request a PF reset
- *
- * TODO: I think at least ECC error requires a GLOBR, not PFR
- */
- if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK)
- device_printf(dev, "ECC Error detected!\n");
- if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK)
- device_printf(dev, "PCI Exception detected!\n");
- if (reg & I40E_PFINT_ICR0_PE_CRITERR_MASK)
- device_printf(dev, "Critical Protocol Engine Error detected!\n");
- /* Checks against the conditions above */
- if (reg & IXL_ICR0_CRIT_ERR_MASK) {
- mask &= ~IXL_ICR0_CRIT_ERR_MASK;
- atomic_set_32(&pf->state,
- IXL_PF_STATE_PF_RESET_REQ | IXL_PF_STATE_PF_CRIT_ERR);
- do_task = TRUE;
- }
-
- // TODO: Linux driver never re-enables this interrupt once it has been detected
- // Then what is supposed to happen? A PF reset? Should it never happen?
- // TODO: Parse out this error into something human readable
- if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- reg = rd32(hw, I40E_PFHMC_ERRORINFO);
- if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
- device_printf(dev, "HMC Error detected!\n");
- device_printf(dev, "INFO 0x%08x\n", reg);
- reg = rd32(hw, I40E_PFHMC_ERRORDATA);
- device_printf(dev, "DATA 0x%08x\n", reg);
- wr32(hw, I40E_PFHMC_ERRORINFO, 0);
- }
- }
-
-#ifdef PCI_IOV
- if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
- mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- iflib_iov_intr_deferred(pf->vsi.ctx);
- }
-#endif
-
- wr32(hw, I40E_PFINT_ICR0_ENA, mask);
- ixl_enable_intr0(hw);
-
- if (do_task)
- return (FILTER_SCHEDULE_THREAD);
- else
- return (FILTER_HANDLED);
-}
-
static u_int
ixl_add_maddr(void *arg, struct sockaddr_dl *sdl, u_int cnt)
{
@@ -704,6 +516,7 @@ ixl_del_multi(struct ixl_vsi *vsi)
IOCTL_DEBUGOUT("ixl_del_multi: begin");
+ /* Search for removed multicast addresses */
SLIST_FOREACH(f, &vsi->ftl, next)
if ((f->flags & IXL_FILTER_USED) &&
(f->flags & IXL_FILTER_MC) &&
@@ -714,7 +527,7 @@ ixl_del_multi(struct ixl_vsi *vsi)
if (mcnt > 0)
ixl_del_hw_filters(vsi, mcnt);
-
+
return (mcnt);
}
@@ -744,7 +557,7 @@ ixl_link_up_msg(struct ixl_pf *pf)
log(LOG_NOTICE, "%s: Link is up, %s Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
ifp->if_xname,
- ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
+ ixl_link_speed_string(hw->phy.link_info.link_speed),
req_fec_string, neg_fec_string,
(hw->phy.link_info.an_info & I40E_AQ_AN_COMPLETED) ? "True" : "False",
(hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX &&
@@ -793,304 +606,78 @@ ixl_configure_intr0_msix(struct ixl_pf *pf)
wr32(hw, I40E_PFINT_STAT_CTL0, 0);
}
-/*
- * Configure queue interrupt cause registers in hardware.
- *
- * Linked list for each vector LNKLSTN(i) -> RQCTL(i) -> TQCTL(i) -> EOL
- */
-void
-ixl_configure_queue_intr_msix(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- u32 reg;
- u16 vector = 1;
-
- // TODO: See if max is really necessary
- for (int i = 0; i < max(vsi->num_rx_queues, vsi->num_tx_queues); i++, vector++) {
- /* Make sure interrupt is disabled */
- wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
- /* Set linked list head to point to corresponding RX queue
- * e.g. vector 1 (LNKLSTN register 0) points to queue pair 0's RX queue */
- reg = ((i << I40E_PFINT_LNKLSTN_FIRSTQ_INDX_SHIFT)
- & I40E_PFINT_LNKLSTN_FIRSTQ_INDX_MASK) |
- ((I40E_QUEUE_TYPE_RX << I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_SHIFT)
- & I40E_PFINT_LNKLSTN_FIRSTQ_TYPE_MASK);
- wr32(hw, I40E_PFINT_LNKLSTN(i), reg);
-
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(i), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(i), reg);
- }
-}
-
-/*
- * Configure for single interrupt vector operation
- */
-void
-ixl_configure_legacy(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- u32 reg;
-
-// TODO: Fix
-#if 0
- /* Configure ITR */
- vsi->tx_itr_setting = pf->tx_itr;
- wr32(hw, I40E_PFINT_ITR0(IXL_TX_ITR),
- vsi->tx_itr_setting);
- txr->itr = vsi->tx_itr_setting;
-
- vsi->rx_itr_setting = pf->rx_itr;
- wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR),
- vsi->rx_itr_setting);
- rxr->itr = vsi->rx_itr_setting;
- /* XXX: Assuming only 1 queue in single interrupt mode */
-#endif
- vsi->rx_queues[0].rxr.itr = vsi->rx_itr_setting;
-
- /* Setup "other" causes */
- reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
- | I40E_PFINT_ICR0_ENA_GRST_MASK
- | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
- | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
- | I40E_PFINT_ICR0_ENA_VFLR_MASK
- | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
- ;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- /* No ITR for non-queue interrupts */
- wr32(hw, I40E_PFINT_STAT_CTL0,
- IXL_ITR_NONE << I40E_PFINT_STAT_CTL0_OTHER_ITR_INDX_SHIFT);
-
- /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
- wr32(hw, I40E_PFINT_LNKLST0, 0);
-
- /* Associate the queue pair to the vector and enable the q int */
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
- | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
- | (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(0), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
- | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
- | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(0), reg);
-}
-
-void
-ixl_free_pci_resources(struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = iflib_get_dev(vsi->ctx);
- struct ixl_rx_queue *rx_que = vsi->rx_queues;
-
- /* We may get here before stations are set up */
- if (rx_que == NULL)
- goto early;
-
- /*
- ** Release all MSI-X VSI resources:
- */
- iflib_irq_free(vsi->ctx, &vsi->irq);
-
- for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++)
- iflib_irq_free(vsi->ctx, &rx_que->que_irq);
-early:
- if (pf->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- rman_get_rid(pf->pci_mem), pf->pci_mem);
-}
-
void
-ixl_add_ifmedia(struct ixl_vsi *vsi, u64 phy_types)
+ixl_add_ifmedia(struct ifmedia *media, u64 phy_types)
{
/* Display supported media types */
if (phy_types & (I40E_CAP_PHY_TYPE_100BASE_TX))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_100_TX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_T))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_1000_T, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_SX))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_1000_SX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_LX))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_1000_LX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_XAUI) ||
phy_types & (I40E_CAP_PHY_TYPE_XFI) ||
phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_SR))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_SR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_LR))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_LR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_T))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_T, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4) ||
phy_types & (I40E_CAP_PHY_TYPE_40GBASE_CR4_CU) ||
phy_types & (I40E_CAP_PHY_TYPE_40GBASE_AOC) ||
phy_types & (I40E_CAP_PHY_TYPE_XLAUI) ||
phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_SR4))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_LR4))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_1000BASE_KX))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_1000_KX, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1_CU)
|| phy_types & (I40E_CAP_PHY_TYPE_10GBASE_CR1))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_AOC))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_AOC, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_SFI))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_10GBASE_KR))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_10G_KR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_20GBASE_KR2))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_XLPPI))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_KR))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_KR, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_25G_KR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_CR))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_CR, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_25G_CR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_SR))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_SR, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_25G_SR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_LR))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_LR, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_25G_LR, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_AOC))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
+ ifmedia_add(media, IFM_ETHER | IFM_25G_AOC, 0, NULL);
if (phy_types & (I40E_CAP_PHY_TYPE_25GBASE_ACC))
- ifmedia_add(vsi->media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
-}
-
-/*********************************************************************
- *
- * Setup networking device structure and register an interface.
- *
- **********************************************************************/
-int
-ixl_setup_interface(device_t dev, struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- if_ctx_t ctx = vsi->ctx;
- struct i40e_hw *hw = &pf->hw;
- struct ifnet *ifp = iflib_get_ifp(ctx);
- struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code aq_error = 0;
-
- INIT_DBG_DEV(dev, "begin");
-
- vsi->shared->isc_max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
-
- aq_error = i40e_aq_get_phy_capabilities(hw,
- FALSE, TRUE, &abilities, NULL);
- /* May need delay to detect fiber correctly */
- if (aq_error == I40E_ERR_UNKNOWN_PHY) {
- /* TODO: Maybe just retry this in a task... */
- i40e_msec_delay(200);
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
- TRUE, &abilities, NULL);
- }
- if (aq_error) {
- if (aq_error == I40E_ERR_UNKNOWN_PHY)
- device_printf(dev, "Unknown PHY type detected!\n");
- else
- device_printf(dev,
- "Error getting supported media types, err %d,"
- " AQ error %d\n", aq_error, hw->aq.asq_last_status);
- } else {
- pf->supported_speeds = abilities.link_speed;
-#if __FreeBSD_version >= 1100000
- if_setbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
-#else
- if_initbaudrate(ifp, ixl_max_aq_speed_to_value(pf->supported_speeds));
-#endif
-
- ixl_add_ifmedia(vsi, hw->phy.phy_types);
- }
-
- /* Use autoselect media by default */
- ifmedia_add(vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(vsi->media, IFM_ETHER | IFM_AUTO);
-
- return (0);
-}
-
-/*
- * Input: bitmap of enum i40e_aq_link_speed
- */
-u64
-ixl_max_aq_speed_to_value(u8 link_speeds)
-{
- if (link_speeds & I40E_LINK_SPEED_40GB)
- return IF_Gbps(40);
- if (link_speeds & I40E_LINK_SPEED_25GB)
- return IF_Gbps(25);
- if (link_speeds & I40E_LINK_SPEED_20GB)
- return IF_Gbps(20);
- if (link_speeds & I40E_LINK_SPEED_10GB)
- return IF_Gbps(10);
- if (link_speeds & I40E_LINK_SPEED_1GB)
- return IF_Gbps(1);
- if (link_speeds & I40E_LINK_SPEED_100MB)
- return IF_Mbps(100);
- else
- /* Minimum supported link speed */
- return IF_Mbps(100);
-}
-
-/*
-** Run when the Admin Queue gets a link state change interrupt.
-*/
-void
-ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = iflib_get_dev(pf->vsi.ctx);
- struct i40e_aqc_get_link_status *status =
- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
-
- /* Request link status from adapter */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
-
- /* Print out message if an unqualified module is found */
- if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
- (pf->advertised_speed) &&
- (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
- (!(status->link_info & I40E_AQ_LINK_UP)))
- device_printf(dev, "Link failed because "
- "an unqualified module was detected!\n");
-
- /* OS link info is updated elsewhere */
+ ifmedia_add(media, IFM_ETHER | IFM_25G_ACC, 0, NULL);
}
/*********************************************************************
@@ -1142,197 +729,6 @@ ixl_switch_config(struct ixl_pf *pf)
return (ret);
}
-/*********************************************************************
- *
- * Initialize the VSI: this handles contexts, which means things
- * like the number of descriptors, buffer size,
- * plus we init the rings thru this function.
- *
- **********************************************************************/
-int
-ixl_initialize_vsi(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- if_softc_ctx_t scctx = iflib_get_softc_ctx(vsi->ctx);
- struct ixl_tx_queue *tx_que = vsi->tx_queues;
- struct ixl_rx_queue *rx_que = vsi->rx_queues;
- device_t dev = iflib_get_dev(vsi->ctx);
- struct i40e_hw *hw = vsi->hw;
- struct i40e_vsi_context ctxt;
- int tc_queues;
- int err = 0;
-
- memset(&ctxt, 0, sizeof(ctxt));
- ctxt.seid = vsi->seid;
- if (pf->veb_seid != 0)
- ctxt.uplink_seid = pf->veb_seid;
- ctxt.pf_num = hw->pf_id;
- err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
- if (err) {
- device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
- " aq_error %d\n", err, hw->aq.asq_last_status);
- return (err);
- }
- ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
- "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
- "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
- "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
- ctxt.uplink_seid, ctxt.vsi_number,
- ctxt.vsis_allocated, ctxt.vsis_unallocated,
- ctxt.flags, ctxt.pf_num, ctxt.vf_num,
- ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
- /*
- ** Set the queue and traffic class bits
- ** - when multiple traffic classes are supported
- ** this will need to be more robust.
- */
- ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
- ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
- /* In contig mode, que_mapping[0] is first queue index used by this VSI */
- ctxt.info.queue_mapping[0] = 0;
- /*
- * This VSI will only use traffic class 0; start traffic class 0's
- * queue allocation at queue 0, and assign it 2^tc_queues queues (though
- * the driver may not use all of them).
- */
- tc_queues = fls(pf->qtag.num_allocated) - 1;
- ctxt.info.tc_mapping[0] = ((pf->qtag.first_qidx << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
- & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
- ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
- & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
-
- /* Set VLAN receive stripping mode */
- ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
- ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
- if (if_getcapenable(vsi->ifp) & IFCAP_VLAN_HWTAGGING)
- ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
- else
- ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
-
-#ifdef IXL_IW
- /* Set TCP Enable for iWARP capable VSI */
- if (ixl_enable_iwarp && pf->iw_enabled) {
- ctxt.info.valid_sections |=
- htole16(I40E_AQ_VSI_PROP_QUEUE_OPT_VALID);
- ctxt.info.queueing_opt_flags |= I40E_AQ_VSI_QUE_OPT_TCP_ENA;
- }
-#endif
- /* Save VSI number and info for use later */
- vsi->vsi_num = ctxt.vsi_number;
- bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
-
- /* Reset VSI statistics */
- ixl_vsi_reset_stats(vsi);
- vsi->hw_filters_add = 0;
- vsi->hw_filters_del = 0;
-
- ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
-
- err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
- if (err) {
- device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
- " aq_error %d\n", err, hw->aq.asq_last_status);
- return (err);
- }
-
- for (int i = 0; i < vsi->num_tx_queues; i++, tx_que++) {
- struct tx_ring *txr = &tx_que->txr;
- struct i40e_hmc_obj_txq tctx;
- u32 txctl;
-
- /* Setup the HMC TX Context */
- bzero(&tctx, sizeof(tctx));
- tctx.new_context = 1;
- tctx.base = (txr->tx_paddr/IXL_TX_CTX_BASE_UNITS);
- tctx.qlen = scctx->isc_ntxd[0];
- tctx.fc_ena = 0; /* Disable FCoE */
- /*
- * This value needs to pulled from the VSI that this queue
- * is assigned to. Index into array is traffic class.
- */
- tctx.rdylist = vsi->info.qs_handle[0];
- /*
- * Set these to enable Head Writeback
- * - Address is last entry in TX ring (reserved for HWB index)
- * Leave these as 0 for Descriptor Writeback
- */
- if (vsi->enable_head_writeback) {
- tctx.head_wb_ena = 1;
- tctx.head_wb_addr = txr->tx_paddr +
- (scctx->isc_ntxd[0] * sizeof(struct i40e_tx_desc));
- } else {
- tctx.head_wb_ena = 0;
- tctx.head_wb_addr = 0;
- }
- tctx.rdylist_act = 0;
- err = i40e_clear_lan_tx_queue_context(hw, i);
- if (err) {
- device_printf(dev, "Unable to clear TX context\n");
- break;
- }
- err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
- if (err) {
- device_printf(dev, "Unable to set TX context\n");
- break;
- }
- /* Associate the ring with this PF */
- txctl = I40E_QTX_CTL_PF_QUEUE;
- txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
- I40E_QTX_CTL_PF_INDX_MASK);
- wr32(hw, I40E_QTX_CTL(i), txctl);
- ixl_flush(hw);
-
- /* Do ring (re)init */
- ixl_init_tx_ring(vsi, tx_que);
- }
- for (int i = 0; i < vsi->num_rx_queues; i++, rx_que++) {
- struct rx_ring *rxr = &rx_que->rxr;
- struct i40e_hmc_obj_rxq rctx;
-
- /* Next setup the HMC RX Context */
- rxr->mbuf_sz = iflib_get_rx_mbuf_sz(vsi->ctx);
-
- u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
-
- /* Set up an RX context for the HMC */
- memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
- rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
- /* ignore header split for now */
- rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
- rctx.rxmax = (scctx->isc_max_frame_size < max_rxmax) ?
- scctx->isc_max_frame_size : max_rxmax;
- rctx.dtype = 0;
- rctx.dsize = 1; /* do 32byte descriptors */
- rctx.hsplit_0 = 0; /* no header split */
- rctx.base = (rxr->rx_paddr/IXL_RX_CTX_BASE_UNITS);
- rctx.qlen = scctx->isc_nrxd[0];
- rctx.tphrdesc_ena = 1;
- rctx.tphwdesc_ena = 1;
- rctx.tphdata_ena = 0; /* Header Split related */
- rctx.tphhead_ena = 0; /* Header Split related */
- rctx.lrxqthresh = 1; /* Interrupt at <64 desc avail */
- rctx.crcstrip = 1;
- rctx.l2tsel = 1;
- rctx.showiv = 1; /* Strip inner VLAN header */
- rctx.fc_ena = 0; /* Disable FCoE */
- rctx.prefena = 1; /* Prefetch descriptors */
-
- err = i40e_clear_lan_rx_queue_context(hw, i);
- if (err) {
- device_printf(dev,
- "Unable to clear RX context %d\n", i);
- break;
- }
- err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
- if (err) {
- device_printf(dev, "Unable to set RX context %d\n", i);
- break;
- }
- wr32(vsi->hw, I40E_QRX_TAIL(i), 0);
- }
- return (err);
-}
-
void
ixl_free_mac_filters(struct ixl_vsi *vsi)
{
@@ -1343,201 +739,29 @@ ixl_free_mac_filters(struct ixl_vsi *vsi)
SLIST_REMOVE_HEAD(&vsi->ftl, next);
free(f, M_DEVBUF);
}
-}
-
-/*
-** Provide a update to the queue RX
-** interrupt moderation value.
-*/
-void
-ixl_set_queue_rx_itr(struct ixl_rx_queue *que)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct i40e_hw *hw = vsi->hw;
- struct rx_ring *rxr = &que->rxr;
- u16 rx_itr;
- u16 rx_latency = 0;
- int rx_bytes;
-
- /* Idle, do nothing */
- if (rxr->bytes == 0)
- return;
-
- if (pf->dynamic_rx_itr) {
- rx_bytes = rxr->bytes/rxr->itr;
- rx_itr = rxr->itr;
-
- /* Adjust latency range */
- switch (rxr->latency) {
- case IXL_LOW_LATENCY:
- if (rx_bytes > 10) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (rx_bytes > 20) {
- rx_latency = IXL_BULK_LATENCY;
- rx_itr = IXL_ITR_8K;
- } else if (rx_bytes <= 10) {
- rx_latency = IXL_LOW_LATENCY;
- rx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (rx_bytes <= 20) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- rxr->latency = rx_latency;
- if (rx_itr != rxr->itr) {
- /* do an exponential smoothing */
- rx_itr = (10 * rx_itr * rxr->itr) /
- ((9 * rx_itr) + rxr->itr);
- rxr->itr = min(rx_itr, IXL_MAX_ITR);
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- rxr->me), rxr->itr);
- }
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->rx_itr_setting = pf->rx_itr;
- /* Update the hardware if needed */
- if (rxr->itr != vsi->rx_itr_setting) {
- rxr->itr = vsi->rx_itr_setting;
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- rxr->me), rxr->itr);
- }
- }
- rxr->bytes = 0;
- rxr->packets = 0;
+ vsi->num_hw_filters = 0;
}
-
-/*
-** Provide a update to the queue TX
-** interrupt moderation value.
-*/
void
-ixl_set_queue_tx_itr(struct ixl_tx_queue *que)
+ixl_vsi_add_sysctls(struct ixl_vsi * vsi, const char * sysctl_name, bool queues_sysctls)
{
- struct ixl_vsi *vsi = que->vsi;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- u16 tx_itr;
- u16 tx_latency = 0;
- int tx_bytes;
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+ struct sysctl_oid_list *vsi_list;
+ tree = device_get_sysctl_tree(vsi->dev);
+ child = SYSCTL_CHILDREN(tree);
+ vsi->vsi_node = SYSCTL_ADD_NODE(&vsi->sysctl_ctx, child, OID_AUTO, sysctl_name,
+ CTLFLAG_RD, NULL, "VSI Number");
- /* Idle, do nothing */
- if (txr->bytes == 0)
- return;
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+ ixl_add_sysctls_eth_stats(&vsi->sysctl_ctx, vsi_list, &vsi->eth_stats);
- if (pf->dynamic_tx_itr) {
- tx_bytes = txr->bytes/txr->itr;
- tx_itr = txr->itr;
-
- switch (txr->latency) {
- case IXL_LOW_LATENCY:
- if (tx_bytes > 10) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (tx_bytes > 20) {
- tx_latency = IXL_BULK_LATENCY;
- tx_itr = IXL_ITR_8K;
- } else if (tx_bytes <= 10) {
- tx_latency = IXL_LOW_LATENCY;
- tx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (tx_bytes <= 20) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- txr->latency = tx_latency;
-
- if (tx_itr != txr->itr) {
- /* do an exponential smoothing */
- tx_itr = (10 * tx_itr * txr->itr) /
- ((9 * tx_itr) + txr->itr);
- txr->itr = min(tx_itr, IXL_MAX_ITR);
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
- txr->me), txr->itr);
- }
-
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->tx_itr_setting = pf->tx_itr;
- /* Update the hardware if needed */
- if (txr->itr != vsi->tx_itr_setting) {
- txr->itr = vsi->tx_itr_setting;
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
- txr->me), txr->itr);
- }
- }
- txr->bytes = 0;
- txr->packets = 0;
- return;
+ if (queues_sysctls)
+ ixl_vsi_add_queues_stats(vsi, &vsi->sysctl_ctx);
}
-#ifdef IXL_DEBUG
-/**
- * ixl_sysctl_qtx_tail_handler
- * Retrieves I40E_QTX_TAIL value from hardware
- * for a sysctl.
- */
-int
-ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_tx_queue *tx_que;
- int error;
- u32 val;
-
- tx_que = ((struct ixl_tx_queue *)oidp->oid_arg1);
- if (!tx_que) return 0;
-
- val = rd32(tx_que->vsi->hw, tx_que->txr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-
-/**
- * ixl_sysctl_qrx_tail_handler
- * Retrieves I40E_QRX_TAIL value from hardware
- * for a sysctl.
- */
-int
-ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_rx_queue *rx_que;
- int error;
- u32 val;
-
- rx_que = ((struct ixl_rx_queue *)oidp->oid_arg1);
- if (!rx_que) return 0;
-
- val = rd32(rx_que->vsi->hw, rx_que->rxr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-#endif
-
/*
* Used to set the Tx ITR value for all of the PF LAN VSI's queues.
* Writes to the ITR registers immediately.
@@ -1607,29 +831,6 @@ ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
}
void
-ixl_add_hw_stats(struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = iflib_get_dev(vsi->ctx);
- struct i40e_hw_port_stats *pf_stats = &pf->stats;
-
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
-
- /* Driver statistics */
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, "admin_irq",
- CTLFLAG_RD, &pf->admin_irq,
- "Admin Queue IRQs received");
-
- ixl_add_vsi_sysctls(dev, vsi, ctx, "pf");
-
- ixl_add_queues_sysctls(dev, vsi);
-
- ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
-}
-
-void
ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
struct sysctl_oid_list *child,
struct i40e_hw_port_stats *stats)
@@ -1760,49 +961,6 @@ ixl_set_rss_pctypes(struct ixl_pf *pf)
}
-void
-ixl_set_rss_hlut(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = iflib_get_dev(vsi->ctx);
- int i, que_id;
- int lut_entry_width;
- u32 lut = 0;
- enum i40e_status_code status;
-
- lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- u8 hlut_buf[512];
- for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
-#ifdef RSS
- /*
- * Fetch the RSS bucket id for the given indirection entry.
- * Cap it at the number of configured buckets (which is
- * num_queues.)
- */
- que_id = rss_get_indirection_to_bucket(i);
- que_id = que_id % vsi->num_rx_queues;
-#else
- que_id = i % vsi->num_rx_queues;
-#endif
- lut = (que_id & ((0x1 << lut_entry_width) - 1));
- hlut_buf[i] = lut;
- }
-
- if (hw->mac.type == I40E_MAC_X722) {
- status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
- if (status)
- device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
- i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
- } else {
- for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
- wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
- ixl_flush(hw);
- }
-}
-
/*
** Setup the PF's RSS parameters.
*/
@@ -1815,41 +973,6 @@ ixl_config_rss(struct ixl_pf *pf)
}
/*
-** This routine updates vlan filters, called by init
-** it scans the filter table and then updates the hw
-** after a soft reset.
-*/
-void
-ixl_setup_vlan_filters(struct ixl_vsi *vsi)
-{
- struct ixl_mac_filter *f;
- int cnt = 0, flags;
-
- if (vsi->num_vlans == 0)
- return;
- /*
- ** Scan the filter list for vlan entries,
- ** mark them for addition and then call
- ** for the AQ update.
- */
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags & IXL_FILTER_VLAN) {
- f->flags |=
- (IXL_FILTER_ADD |
- IXL_FILTER_USED);
- cnt++;
- }
- }
- if (cnt == 0) {
- printf("setup vlan: no filters found!\n");
- return;
- }
- flags = IXL_FILTER_VLAN;
- flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
- ixl_add_hw_filters(vsi, flags, cnt);
-}
-
-/*
* In some firmware versions there is default MAC/VLAN filter
* configured which interferes with filters managed by driver.
* Make sure it's removed.
@@ -1877,29 +1000,40 @@ ixl_del_default_hw_filters(struct ixl_vsi *vsi)
** Initialize filter list and add filters that the hardware
** needs to know about.
**
-** Requires VSI's filter list & seid to be set before calling.
+** Requires VSI's seid to be set before calling.
*/
void
ixl_init_filters(struct ixl_vsi *vsi)
{
struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ ixl_dbg_filter(pf, "%s: start\n", __func__);
+
/* Initialize mac filter list for VSI */
SLIST_INIT(&vsi->ftl);
+ vsi->num_hw_filters = 0;
/* Receive broadcast Ethernet frames */
i40e_aq_set_vsi_broadcast(&pf->hw, vsi->seid, TRUE, NULL);
+ if (IXL_VSI_IS_VF(vsi))
+ return;
+
ixl_del_default_hw_filters(vsi);
ixl_add_filter(vsi, vsi->hw->mac.addr, IXL_VLAN_ANY);
+
/*
* Prevent Tx flow control frames from being sent out by
* non-firmware transmitters.
* This affects every VSI in the PF.
*/
+#ifndef IXL_DEBUG_FC
+ i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
+#else
if (pf->enable_tx_fc_filter)
i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
+#endif
}
/*
@@ -1940,11 +1074,12 @@ ixl_add_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
struct ixl_pf *pf;
device_t dev;
- DEBUGOUT("ixl_add_filter: begin");
-
pf = vsi->back;
dev = pf->dev;
+ ixl_dbg_filter(pf, "ixl_add_filter: " MAC_FORMAT ", vlan %4d\n",
+ MAC_FORMAT_ARGS(macaddr), vlan);
+
/* Does one already exist */
f = ixl_find_filter(vsi, macaddr, vlan);
if (f != NULL)
@@ -1981,6 +1116,10 @@ ixl_del_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
{
struct ixl_mac_filter *f;
+ ixl_dbg_filter((struct ixl_pf *)vsi->back,
+ "ixl_del_filter: " MAC_FORMAT ", vlan %4d\n",
+ MAC_FORMAT_ARGS(macaddr), vlan);
+
f = ixl_find_filter(vsi, macaddr, vlan);
if (f == NULL)
return;
@@ -2012,7 +1151,7 @@ ixl_find_filter(struct ixl_vsi *vsi, const u8 *macaddr, s16 vlan)
&& (f->vlan == vlan)) {
return (f);
}
- }
+ }
return (NULL);
}
@@ -2037,6 +1176,9 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
dev = vsi->dev;
hw = &pf->hw;
+ ixl_dbg_filter(pf,
+ "ixl_add_hw_filters: flags: %d cnt: %d\n", flags, cnt);
+
if (cnt < 1) {
ixl_dbg_info(pf, "ixl_add_hw_filters: cnt == 0\n");
return;
@@ -2084,7 +1226,7 @@ ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
"error %s\n", i40e_stat_str(hw, status),
i40e_aq_str(hw, hw->aq.asq_last_status));
else
- vsi->hw_filters_add += j;
+ vsi->num_hw_filters += j;
}
free(a, M_DEVBUF);
return;
@@ -2110,6 +1252,8 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
hw = &pf->hw;
dev = vsi->dev;
+ ixl_dbg_filter(pf, "%s: start, cnt: %d\n", __func__, cnt);
+
d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
M_DEVBUF, M_NOWAIT | M_ZERO);
if (d == NULL) {
@@ -2146,14 +1290,16 @@ ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
int sc = 0;
for (int i = 0; i < j; i++)
sc += (!d[i].error_code);
- vsi->hw_filters_del += sc;
+ vsi->num_hw_filters -= sc;
device_printf(dev,
"Failed to remove %d/%d filters, error %s\n",
j - sc, j, i40e_aq_str(hw, hw->aq.asq_last_status));
} else
- vsi->hw_filters_del += j;
+ vsi->num_hw_filters -= j;
}
free(d, M_DEVBUF);
+
+ ixl_dbg_filter(pf, "%s: end\n", __func__);
return;
}
@@ -2240,22 +1386,6 @@ ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
return (error);
}
-/* For PF VSI only */
-int
-ixl_enable_rings(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- int error = 0;
-
- for (int i = 0; i < vsi->num_tx_queues; i++)
- error = ixl_enable_tx_ring(pf, &pf->qtag, i);
-
- for (int i = 0; i < vsi->num_rx_queues; i++)
- error = ixl_enable_rx_ring(pf, &pf->qtag, i);
-
- return (error);
-}
-
/*
* Returns error on first ring that is detected hung.
*/
@@ -2269,6 +1399,10 @@ ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ ixl_dbg(pf, IXL_DBG_EN_DIS,
+ "Disabling PF TX ring %4d / VSI TX ring %4d...\n",
+ pf_qidx, vsi_qidx);
+
i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
i40e_usec_delay(500);
@@ -2304,6 +1438,10 @@ ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ ixl_dbg(pf, IXL_DBG_EN_DIS,
+ "Disabling PF RX ring %4d / VSI RX ring %4d...\n",
+ pf_qidx, vsi_qidx);
+
reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
@@ -2336,20 +1474,6 @@ ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
return (error);
}
-int
-ixl_disable_rings(struct ixl_pf *pf, struct ixl_vsi *vsi, struct ixl_pf_qtag *qtag)
-{
- int error = 0;
-
- for (int i = 0; i < vsi->num_tx_queues; i++)
- error = ixl_disable_tx_ring(pf, qtag, i);
-
- for (int i = 0; i < vsi->num_rx_queues; i++)
- error = ixl_disable_rx_ring(pf, qtag, i);
-
- return (error);
-}
-
static void
ixl_handle_tx_mdd_event(struct ixl_pf *pf)
{
@@ -2526,29 +1650,6 @@ ixl_handle_mdd_event(struct ixl_pf *pf)
}
void
-ixl_enable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_rx_queue *que = vsi->rx_queues;
-
- if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
- for (int i = 0; i < vsi->num_rx_queues; i++, que++)
- ixl_enable_queue(hw, que->rxr.me);
- } else
- ixl_enable_intr0(hw);
-}
-
-void
-ixl_disable_rings_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_rx_queue *que = vsi->rx_queues;
-
- for (int i = 0; i < vsi->num_rx_queues; i++, que++)
- ixl_disable_queue(hw, que->rxr.me);
-}
-
-void
ixl_enable_intr0(struct i40e_hw *hw)
{
u32 reg;
@@ -2591,6 +1692,35 @@ ixl_disable_queue(struct i40e_hw *hw, int id)
}
void
+ixl_handle_empr_reset(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
+
+ ixl_prepare_for_reset(pf, is_up);
+ /*
+ * i40e_pf_reset checks the type of reset and acts
+ * accordingly. If EMP or Core reset was performed
+ * doing PF reset is not necessary and it sometimes
+ * fails.
+ */
+ ixl_pf_reset(pf);
+
+ if (!IXL_PF_IN_RECOVERY_MODE(pf) &&
+ ixl_get_fw_mode(pf) == IXL_FW_MODE_RECOVERY) {
+ atomic_set_32(&pf->state, IXL_PF_STATE_RECOVERY_MODE);
+ device_printf(pf->dev,
+ "Firmware recovery mode detected. Limiting functionality. Refer to Intel(R) Ethernet Adapters and Devices User Guide for details on firmware recovery mode.\n");
+ pf->link_up = FALSE;
+ ixl_update_link_status(pf);
+ }
+
+ ixl_rebuild_hw_structs_after_reset(pf, is_up);
+
+ atomic_clear_32(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
+}
+
+void
ixl_update_stats_counters(struct ixl_pf *pf)
{
struct i40e_hw *hw = &pf->hw;
@@ -2774,163 +1904,6 @@ ixl_update_stats_counters(struct ixl_pf *pf)
}
}
-int
-ixl_prepare_for_reset(struct ixl_pf *pf, bool is_up)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int error = 0;
-
- error = i40e_shutdown_lan_hmc(hw);
- if (error)
- device_printf(dev,
- "Shutdown LAN HMC failed with code %d\n", error);
-
- ixl_disable_intr0(hw);
-
- error = i40e_shutdown_adminq(hw);
- if (error)
- device_printf(dev,
- "Shutdown Admin queue failed with code %d\n", error);
-
- ixl_pf_qmgr_release(&pf->qmgr, &pf->qtag);
- return (error);
-}
-
-int
-ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = pf->dev;
- int error = 0;
-
- device_printf(dev, "Rebuilding driver state...\n");
-
- error = i40e_pf_reset(hw);
- if (error) {
- device_printf(dev, "PF reset failure %s\n",
- i40e_stat_str(hw, error));
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- /* Setup */
- error = i40e_init_adminq(hw);
- if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
- device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
- error);
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- i40e_clear_pxe_mode(hw);
-
- error = ixl_get_hw_capabilities(pf);
- if (error) {
- device_printf(dev, "ixl_get_hw_capabilities failed: %d\n", error);
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init_lan_hmc failed: %d\n", error);
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (error) {
- device_printf(dev, "configure_lan_hmc failed: %d\n", error);
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- /* reserve a contiguous allocation for the PF's VSI */
- error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_tx_queues, &pf->qtag);
- if (error) {
- device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
- error);
- /* TODO: error handling */
- }
-
- error = ixl_switch_config(pf);
- if (error) {
- device_printf(dev, "ixl_rebuild_hw_structs_after_reset: ixl_switch_config() failed: %d\n",
- error);
- error = EIO;
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
- NULL);
- if (error) {
- device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
- " aq_err %d\n", error, hw->aq.asq_last_status);
- error = EIO;
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- u8 set_fc_err_mask;
- error = i40e_set_fc(hw, &set_fc_err_mask, true);
- if (error) {
- device_printf(dev, "init: setting link flow control failed; retcode %d,"
- " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
- error = EIO;
- goto ixl_rebuild_hw_structs_after_reset_err;
- }
-
- /* Remove default filters reinstalled by FW on reset */
- ixl_del_default_hw_filters(vsi);
-
- /* Determine link state */
- if (ixl_attach_get_link_status(pf)) {
- error = EINVAL;
- /* TODO: error handling */
- }
-
- i40e_aq_set_dcb_parameters(hw, TRUE, NULL);
- ixl_get_fw_lldp_status(pf);
-
- /* Keep admin queue interrupts active while driver is loaded */
- if (vsi->shared->isc_intr == IFLIB_INTR_MSIX) {
- ixl_configure_intr0_msix(pf);
- ixl_enable_intr0(hw);
- }
-
- device_printf(dev, "Rebuilding driver state done.\n");
- return (0);
-
-ixl_rebuild_hw_structs_after_reset_err:
- device_printf(dev, "Reload the driver to recover\n");
- return (error);
-}
-
-void
-ixl_handle_empr_reset(struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- struct i40e_hw *hw = &pf->hw;
- bool is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
- int count = 0;
- u32 reg;
-
- ixl_prepare_for_reset(pf, is_up);
-
- /* Typically finishes within 3-4 seconds */
- while (count++ < 100) {
- reg = rd32(hw, I40E_GLGEN_RSTAT)
- & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
- if (reg)
- i40e_msec_delay(100);
- else
- break;
- }
- ixl_dbg(pf, IXL_DBG_INFO,
- "Reset wait count: %d\n", count);
-
- ixl_rebuild_hw_structs_after_reset(pf);
-
- atomic_clear_int(&pf->state, IXL_PF_STATE_ADAPTER_RESETTING);
-}
-
/**
* Update VSI-specific ethernet statistics counters.
**/
@@ -2941,12 +1914,10 @@ ixl_update_eth_stats(struct ixl_vsi *vsi)
struct i40e_hw *hw = &pf->hw;
struct i40e_eth_stats *es;
struct i40e_eth_stats *oes;
- struct i40e_hw_port_stats *nsd;
u16 stat_idx = vsi->info.stat_counter_idx;
es = &vsi->eth_stats;
oes = &vsi->eth_stats_offsets;
- nsd = &pf->stats;
/* Gather up the stats that the hw collects */
ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
@@ -3107,6 +2078,67 @@ ixl_stat_update32(struct i40e_hw *hw, u32 reg,
*stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
}
+/**
+ * Add subset of device sysctls safe to use in recovery mode
+ */
+void
+ixl_add_sysctls_recovery_mode(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ struct sysctl_oid *debug_node;
+ struct sysctl_oid_list *debug_list;
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fw_version",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, pf, 0,
+ ixl_sysctl_show_fw, "A", "Firmware version");
+
+ /* Add sysctls meant to print debug information, but don't list them
+ * in "sysctl -a" output. */
+ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
+ OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP | CTLFLAG_MPSAFE, NULL,
+ "Debug Sysctls");
+ debug_list = SYSCTL_CHILDREN(debug_node);
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
+ &pf->hw.debug_mask, 0, "Shared code debug message level");
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "core_debug_mask", CTLFLAG_RW,
+ &pf->dbg_mask, 0, "Non-shared code debug message level");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "dump_debug_data",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ pf, 0, ixl_sysctl_dump_debug_data, "A", "Dump Debug Data from FW");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "do_pf_reset",
+ CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
+ pf, 0, ixl_sysctl_do_pf_reset, "I", "Tell HW to initiate a PF reset");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "do_core_reset",
+ CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
+ pf, 0, ixl_sysctl_do_core_reset, "I", "Tell HW to initiate a CORE reset");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "do_global_reset",
+ CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
+ pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "queue_interrupt_table",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
+ pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
+}
+
void
ixl_add_device_sysctls(struct ixl_pf *pf)
{
@@ -3295,12 +2327,6 @@ ixl_add_device_sysctls(struct ixl_pf *pf)
pf, 0, ixl_sysctl_do_global_reset, "I", "Tell HW to initiate a GLOBAL reset");
SYSCTL_ADD_PROC(ctx, debug_list,
- OID_AUTO, "do_emp_reset",
- CTLTYPE_INT | CTLFLAG_WR | CTLFLAG_NEEDGIANT,
- pf, 0, ixl_sysctl_do_emp_reset, "I",
- "(This doesn't work) Tell HW to initiate a EMP (entire firmware) reset");
-
- SYSCTL_ADD_PROC(ctx, debug_list,
OID_AUTO, "queue_interrupt_table",
CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT,
pf, 0, ixl_sysctl_queue_interrupt_table, "A", "View MSI-X indices for TX/RX queues");
@@ -3338,54 +2364,10 @@ ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
return sysctl_handle_int(oidp, NULL, queues, req);
}
-/*
-** Set flow control using sysctl:
-** 0 - off
-** 1 - rx pause
-** 2 - tx pause
-** 3 - full
-*/
-int
-ixl_sysctl_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int requested_fc, error = 0;
- enum i40e_status_code aq_error = 0;
- u8 fc_aq_err = 0;
-
- /* Get request */
- requested_fc = pf->fc;
- error = sysctl_handle_int(oidp, &requested_fc, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- if (requested_fc < 0 || requested_fc > 3) {
- device_printf(dev,
- "Invalid fc mode; valid modes are 0 through 3\n");
- return (EINVAL);
- }
-
- /* Set fc ability for port */
- hw->fc.requested_mode = requested_fc;
- aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
- if (aq_error) {
- device_printf(dev,
- "%s: Error setting new fc mode %d; fc_err %#x\n",
- __func__, aq_error, fc_aq_err);
- return (EIO);
- }
- pf->fc = requested_fc;
-
- return (0);
-}
-
-char *
-ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
+static const char *
+ixl_link_speed_string(enum i40e_aq_link_speed link_speed)
{
- int index;
-
- char *speeds[] = {
+ const char * link_speed_str[] = {
"Unknown",
"100 Mbps",
"1 Gbps",
@@ -3394,6 +2376,7 @@ ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
"20 Gbps",
"25 Gbps",
};
+ int index;
switch (link_speed) {
case I40E_LINK_SPEED_100MB:
@@ -3420,7 +2403,7 @@ ixl_aq_speed_to_str(enum i40e_aq_link_speed link_speed)
break;
}
- return speeds[index];
+ return (link_speed_str[index]);
}
int
@@ -3433,8 +2416,10 @@ ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
ixl_update_link_status(pf);
error = sysctl_handle_string(oidp,
- ixl_aq_speed_to_str(hw->phy.link_info.link_speed),
+ __DECONST(void *,
+ ixl_link_speed_string(hw->phy.link_info.link_speed)),
8, req);
+
return (error);
}
@@ -3445,7 +2430,8 @@ ixl_sysctl_current_speed(SYSCTL_HANDLER_ARGS)
static u8
ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
{
- static u16 speedmap[6] = {
+#define SPEED_MAP_SIZE 6
+ static u16 speedmap[SPEED_MAP_SIZE] = {
(I40E_LINK_SPEED_100MB | (0x1 << 8)),
(I40E_LINK_SPEED_1GB | (0x2 << 8)),
(I40E_LINK_SPEED_10GB | (0x4 << 8)),
@@ -3455,7 +2441,7 @@ ixl_convert_sysctl_aq_link_speed(u8 speeds, bool to_aq)
};
u8 retval = 0;
- for (int i = 0; i < 6; i++) {
+ for (int i = 0; i < SPEED_MAP_SIZE; i++) {
if (to_aq)
retval |= (speeds & (speedmap[i] >> 8)) ? (speedmap[i] & 0xff) : 0;
else
@@ -3498,7 +2484,8 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
config.eee_capability = abilities.eee_capability;
config.eeer = abilities.eeer_val;
config.low_power_ctrl = abilities.d3_lpan;
- config.fec_config = (abilities.fec_cfg_curr_mod_ext_info & 0x1e);
+ config.fec_config = abilities.fec_cfg_curr_mod_ext_info
+ & I40E_AQ_PHY_FEC_CONFIG_MASK;
/* Do aq command & restart link */
aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
@@ -3514,7 +2501,7 @@ ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds, bool from_aq)
}
/*
-** Supported link speedsL
+** Supported link speeds
** Flags:
** 0x1 - 100 Mb
** 0x2 - 1G
@@ -3558,6 +2545,11 @@ ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
error = sysctl_handle_int(oidp, &requested_ls, 0, req);
if ((error) || (req->newptr == NULL))
return (error);
+ if (IXL_PF_IN_RECOVERY_MODE(pf)) {
+ device_printf(dev, "Interface is currently in FW recovery mode. "
+ "Setting advertise speed not supported\n");
+ return (EINVAL);
+ }
/* Error out if bits outside of possible flag range are set */
if ((requested_ls & ~((u8)0x3F)) != 0) {
@@ -3586,6 +2578,29 @@ ixl_sysctl_set_advertise(SYSCTL_HANDLER_ARGS)
}
/*
+ * Input: bitmap of enum i40e_aq_link_speed
+ */
+u64
+ixl_max_aq_speed_to_value(u8 link_speeds)
+{
+ if (link_speeds & I40E_LINK_SPEED_40GB)
+ return IF_Gbps(40);
+ if (link_speeds & I40E_LINK_SPEED_25GB)
+ return IF_Gbps(25);
+ if (link_speeds & I40E_LINK_SPEED_20GB)
+ return IF_Gbps(20);
+ if (link_speeds & I40E_LINK_SPEED_10GB)
+ return IF_Gbps(10);
+ if (link_speeds & I40E_LINK_SPEED_1GB)
+ return IF_Gbps(1);
+ if (link_speeds & I40E_LINK_SPEED_100MB)
+ return IF_Mbps(100);
+ else
+ /* Minimum supported link speed */
+ return IF_Mbps(100);
+}
+
+/*
** Get the width and transaction speed of
** the bus this adapter is plugged into.
*/
@@ -3659,34 +2674,29 @@ ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
void
ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
{
- if ((nvma->command == I40E_NVM_READ) &&
- ((nvma->config & 0xFF) == 0xF) &&
- (((nvma->config & 0xF00) >> 8) == 0xF) &&
- (nvma->offset == 0) &&
- (nvma->data_size == 1)) {
- // device_printf(dev, "- Get Driver Status Command\n");
- }
- else if (nvma->command == I40E_NVM_READ) {
-
- }
- else {
- switch (nvma->command) {
- case 0xB:
- device_printf(dev, "- command: I40E_NVM_READ\n");
- break;
- case 0xC:
- device_printf(dev, "- command: I40E_NVM_WRITE\n");
- break;
- default:
- device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
- break;
+ u8 nvma_ptr = nvma->config & 0xFF;
+ u8 nvma_flags = (nvma->config & 0xF00) >> 8;
+ const char * cmd_str;
+
+ switch (nvma->command) {
+ case I40E_NVM_READ:
+ if (nvma_ptr == 0xF && nvma_flags == 0xF &&
+ nvma->offset == 0 && nvma->data_size == 1) {
+ device_printf(dev, "NVMUPD: Get Driver Status Command\n");
+ return;
}
-
- device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
- device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
- device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
- device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
+ cmd_str = "READ ";
+ break;
+ case I40E_NVM_WRITE:
+ cmd_str = "WRITE";
+ break;
+ default:
+ device_printf(dev, "NVMUPD: unknown command: 0x%08x\n", nvma->command);
+ return;
}
+ device_printf(dev,
+ "NVMUPD: cmd: %s ptr: 0x%02x flags: 0x%01x offset: 0x%08x data_s: 0x%08x\n",
+ cmd_str, nvma_ptr, nvma_flags, nvma->offset, nvma->data_size);
}
int
@@ -3716,12 +2726,12 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
return (EINVAL);
}
- nvma = malloc(ifd_len, M_DEVBUF, M_WAITOK);
+ nvma = malloc(ifd_len, M_IXL, M_WAITOK);
err = copyin(ifd->ifd_data, nvma, ifd_len);
if (err) {
device_printf(dev, "%s: Cannot get request from user space\n",
__func__);
- free(nvma, M_DEVBUF);
+ free(nvma, M_IXL);
return (err);
}
@@ -3738,14 +2748,18 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
}
if (pf->state & IXL_PF_STATE_ADAPTER_RESETTING) {
- free(nvma, M_DEVBUF);
+ device_printf(dev,
+ "%s: timeout waiting for EMP reset to finish\n",
+ __func__);
+ free(nvma, M_IXL);
return (-EBUSY);
}
if (nvma->data_size < 1 || nvma->data_size > 4096) {
- device_printf(dev, "%s: invalid request, data size not in supported range\n",
+ device_printf(dev,
+ "%s: invalid request, data size not in supported range\n",
__func__);
- free(nvma, M_DEVBUF);
+ free(nvma, M_IXL);
return (EINVAL);
}
@@ -3759,12 +2773,12 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
if (ifd_len < exp_len) {
ifd_len = exp_len;
- nvma = realloc(nvma, ifd_len, M_DEVBUF, M_WAITOK);
+ nvma = realloc(nvma, ifd_len, M_IXL, M_WAITOK);
err = copyin(ifd->ifd_data, nvma, ifd_len);
if (err) {
device_printf(dev, "%s: Cannot get request from user space\n",
__func__);
- free(nvma, M_DEVBUF);
+ free(nvma, M_IXL);
return (err);
}
}
@@ -3775,7 +2789,7 @@ ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
// IXL_PF_UNLOCK(pf);
err = copyout(nvma, ifd->ifd_data, ifd_len);
- free(nvma, M_DEVBUF);
+ free(nvma, M_IXL);
if (err) {
device_printf(dev, "%s: Cannot return data to user space\n",
__func__);
@@ -3994,23 +3008,35 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
for (int i = 0; i < 32; i++)
if ((1 << i) & abilities.phy_type)
sbuf_printf(buf, "%s,", ixl_phy_type_string(i, false));
- sbuf_printf(buf, ">\n");
+ sbuf_printf(buf, ">");
}
- sbuf_printf(buf, "PHY Ext : %02x",
+ sbuf_printf(buf, "\nPHY Ext : %02x",
abilities.phy_type_ext);
if (abilities.phy_type_ext != 0) {
sbuf_printf(buf, "<");
for (int i = 0; i < 4; i++)
if ((1 << i) & abilities.phy_type_ext)
- sbuf_printf(buf, "%s,", ixl_phy_type_string(i, true));
+ sbuf_printf(buf, "%s,",
+ ixl_phy_type_string(i, true));
sbuf_printf(buf, ">");
}
- sbuf_printf(buf, "\n");
- sbuf_printf(buf,
- "Speed : %02x\n"
+ sbuf_printf(buf, "\nSpeed : %02x", abilities.link_speed);
+ if (abilities.link_speed != 0) {
+ u8 link_speed;
+ sbuf_printf(buf, " <");
+ for (int i = 0; i < 8; i++) {
+ link_speed = (1 << i) & abilities.link_speed;
+ if (link_speed)
+ sbuf_printf(buf, "%s, ",
+ ixl_link_speed_string(link_speed));
+ }
+ sbuf_printf(buf, ">");
+ }
+
+ sbuf_printf(buf, "\n"
"Abilities: %02x\n"
"EEE cap : %04x\n"
"EEER reg : %08x\n"
@@ -4020,7 +3046,6 @@ ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
"ModType E: %01x\n"
"FEC Cfg : %02x\n"
"Ext CC : %02x",
- abilities.link_speed,
abilities.abilities, abilities.eee_capability,
abilities.eeer_val, abilities.d3_lpan,
abilities.phy_id[0], abilities.phy_id[1],
@@ -4051,7 +3076,7 @@ ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
if (!buf) {
- device_printf(dev, "Could not allocate sbuf for output.\n");
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
return (ENOMEM);
}
@@ -4126,11 +3151,10 @@ ixl_res_alloc_cmp(const void *a, const void *b)
/*
* Longest string length: 25
*/
-char *
+const char *
ixl_switch_res_type_string(u8 type)
{
- // TODO: This should be changed to static const
- char * ixl_switch_res_type_strings[0x14] = {
+ static const char * ixl_switch_res_type_strings[IXL_SW_RES_SIZE] = {
"VEB",
"VSI",
"Perfect Match MAC address",
@@ -4153,7 +3177,7 @@ ixl_switch_res_type_string(u8 type)
"Tunneling Port"
};
- if (type < 0x14)
+ if (type < IXL_SW_RES_SIZE)
return ixl_switch_res_type_strings[type];
else
return "(Reserved)";
@@ -4222,29 +3246,52 @@ ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
return (error);
}
+enum ixl_sw_seid_offset {
+ IXL_SW_SEID_EMP = 1,
+ IXL_SW_SEID_MAC_START = 2,
+ IXL_SW_SEID_MAC_END = 5,
+ IXL_SW_SEID_PF_START = 16,
+ IXL_SW_SEID_PF_END = 31,
+ IXL_SW_SEID_VF_START = 32,
+ IXL_SW_SEID_VF_END = 159,
+};
+
/*
-** Caller must init and delete sbuf; this function will clear and
-** finish it for caller.
-*/
-char *
-ixl_switch_element_string(struct sbuf *s,
- struct i40e_aqc_switch_config_element_resp *element)
+ * Caller must init and delete sbuf; this function will clear and
+ * finish it for caller.
+ *
+ * Note: The SEID argument only applies for elements defined by FW at
+ * power-on; these include the EMP, Ports, PFs and VFs.
+ */
+static char *
+ixl_switch_element_string(struct sbuf *s, u8 element_type, u16 seid)
{
sbuf_clear(s);
- switch (element->element_type) {
- case I40E_AQ_SW_ELEM_TYPE_MAC:
- sbuf_printf(s, "MAC %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_PF:
- sbuf_printf(s, "PF %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_VF:
- sbuf_printf(s, "VF %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_EMP:
+ /* If SEID is in certain ranges, then we can infer the
+ * mapping of SEID to switch element.
+ */
+ if (seid == IXL_SW_SEID_EMP) {
sbuf_cat(s, "EMP");
- break;
+ goto out;
+ } else if (seid >= IXL_SW_SEID_MAC_START &&
+ seid <= IXL_SW_SEID_MAC_END) {
+ sbuf_printf(s, "MAC %2d",
+ seid - IXL_SW_SEID_MAC_START);
+ goto out;
+ } else if (seid >= IXL_SW_SEID_PF_START &&
+ seid <= IXL_SW_SEID_PF_END) {
+ sbuf_printf(s, "PF %3d",
+ seid - IXL_SW_SEID_PF_START);
+ goto out;
+ } else if (seid >= IXL_SW_SEID_VF_START &&
+ seid <= IXL_SW_SEID_VF_END) {
+ sbuf_printf(s, "VF %3d",
+ seid - IXL_SW_SEID_VF_START);
+ goto out;
+ }
+
+ switch (element_type) {
case I40E_AQ_SW_ELEM_TYPE_BMC:
sbuf_cat(s, "BMC");
break;
@@ -4258,18 +3305,29 @@ ixl_switch_element_string(struct sbuf *s,
sbuf_cat(s, "PA");
break;
case I40E_AQ_SW_ELEM_TYPE_VSI:
- sbuf_printf(s, "VSI %3d", element->element_info);
+ sbuf_printf(s, "VSI");
break;
default:
sbuf_cat(s, "?");
break;
}
+out:
sbuf_finish(s);
return sbuf_data(s);
}
static int
+ixl_sw_cfg_elem_seid_cmp(const void *a, const void *b)
+{
+ const struct i40e_aqc_switch_config_element_resp *one, *two;
+ one = (const struct i40e_aqc_switch_config_element_resp *)a;
+ two = (const struct i40e_aqc_switch_config_element_resp *)b;
+
+ return ((int)one->seid - (int)two->seid);
+}
+
+static int
ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
{
struct ixl_pf *pf = (struct ixl_pf *)arg1;
@@ -4282,6 +3340,7 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
u16 next = 0;
u8 aq_buf[I40E_AQ_LARGE_BUF];
+ struct i40e_aqc_switch_config_element_resp *elem;
struct i40e_aqc_get_switch_config_resp *sw_config;
sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
@@ -4312,28 +3371,41 @@ ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
+ /* Sort entries by SEID for display */
+ qsort(sw_config->element, sw_config->header.num_reported,
+ sizeof(struct i40e_aqc_switch_config_element_resp),
+ &ixl_sw_cfg_elem_seid_cmp);
+
sbuf_cat(buf, "\n");
/* Assuming <= 255 elements in switch */
sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
/* Exclude:
- ** Revision -- all elements are revision 1 for now
- */
+ * Revision -- all elements are revision 1 for now
+ */
sbuf_printf(buf,
- "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
- " | | | (uplink)\n");
+ "SEID ( Name ) | Up ( Name ) | Down ( Name ) | Conn Type\n"
+ " | | | (uplink)\n");
for (int i = 0; i < sw_config->header.num_reported; i++) {
+ elem = &sw_config->element[i];
+
// "%4d (%8s) | %8s %8s %#8x",
- sbuf_printf(buf, "%4d", sw_config->element[i].seid);
+ sbuf_printf(buf, "%4d", elem->seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
+ elem->element_type, elem->seid));
+ sbuf_cat(buf, " | ");
+ sbuf_printf(buf, "%4d", elem->uplink_seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
+ 0, elem->uplink_seid));
+ sbuf_cat(buf, " | ");
+ sbuf_printf(buf, "%4d", elem->downlink_seid);
sbuf_cat(buf, " ");
sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
- &sw_config->element[i]));
+ 0, elem->downlink_seid));
sbuf_cat(buf, " | ");
- sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
+ sbuf_printf(buf, "%8d", elem->connection_type);
if (i < sw_config->header.num_reported - 1)
sbuf_cat(buf, "\n");
}
@@ -4367,7 +3439,7 @@ ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
return (ENOMEM);
}
- bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
+ bzero(&key_data, sizeof(key_data));
sbuf_cat(buf, "\n");
if (hw->mac.type == I40E_MAC_X722) {
@@ -4534,8 +3606,13 @@ ixl_sysctl_fw_link_management(SYSCTL_HANDLER_ARGS)
}
/*
- * Read some diagnostic data from an SFP module
- * Bytes 96-99, 102-105 from device address 0xA2
+ * Read some diagnostic data from a (Q)SFP+ module
+ *
+ * SFP A2 QSFP Lower Page
+ * Temperature 96-97 22-23
+ * Vcc 98-99 26-27
+ * TX power 102-103 34-35..40-41
+ * RX power 104-105 50-51..56-57
*/
static int
ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
@@ -4546,31 +3623,67 @@ ixl_sysctl_read_i2c_diag_data(SYSCTL_HANDLER_ARGS)
int error = 0;
u8 output;
+ if (req->oldptr == NULL) {
+ error = SYSCTL_OUT(req, 0, 128);
+ return (0);
+ }
+
error = pf->read_i2c_byte(pf, 0, 0xA0, &output);
if (error) {
device_printf(dev, "Error reading from i2c\n");
return (error);
}
- if (output != 0x3) {
- device_printf(dev, "Module is not SFP/SFP+/SFP28 (%02X)\n", output);
- return (EIO);
- }
- pf->read_i2c_byte(pf, 92, 0xA0, &output);
- if (!(output & 0x60)) {
- device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
- return (EIO);
- }
+ /* 0x3 for SFP; 0xD/0x11 for QSFP+/QSFP28 */
+ if (output == 0x3) {
+ /*
+ * Check for:
+ * - Internally calibrated data
+ * - Diagnostic monitoring is implemented
+ */
+ pf->read_i2c_byte(pf, 92, 0xA0, &output);
+ if (!(output & 0x60)) {
+ device_printf(dev, "Module doesn't support diagnostics: %02X\n", output);
+ return (0);
+ }
- sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
- for (u8 offset = 96; offset < 100; offset++) {
- pf->read_i2c_byte(pf, offset, 0xA2, &output);
- sbuf_printf(sbuf, "%02X ", output);
- }
- for (u8 offset = 102; offset < 106; offset++) {
- pf->read_i2c_byte(pf, offset, 0xA2, &output);
- sbuf_printf(sbuf, "%02X ", output);
+ for (u8 offset = 96; offset < 100; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA2, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ for (u8 offset = 102; offset < 106; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA2, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ } else if (output == 0xD || output == 0x11) {
+ /*
+ * QSFP+ modules are always internally calibrated, and must indicate
+ * what types of diagnostic monitoring are implemented
+ */
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+
+ for (u8 offset = 22; offset < 24; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ for (u8 offset = 26; offset < 28; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ /* Read the data from the first lane */
+ for (u8 offset = 34; offset < 36; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ for (u8 offset = 50; offset < 52; offset++) {
+ pf->read_i2c_byte(pf, offset, 0xA0, &output);
+ sbuf_printf(sbuf, "%02X ", output);
+ }
+ } else {
+ device_printf(dev, "Module is not SFP/SFP+/SFP28/QSFP+ (%02X)\n", output);
+ return (0);
}
sbuf_finish(sbuf);
@@ -4661,6 +3774,9 @@ ixl_get_fec_config(struct ixl_pf *pf, struct i40e_aq_get_phy_abilities_resp *abi
struct i40e_hw *hw = &pf->hw;
enum i40e_status_code status;
+ if (IXL_PF_IN_RECOVERY_MODE(pf))
+ return (EIO);
+
status = i40e_aq_get_phy_capabilities(hw,
FALSE, FALSE, abilities, NULL);
if (status) {
@@ -4820,7 +3936,7 @@ ixl_sysctl_dump_debug_data(SYSCTL_HANDLER_ARGS)
u8 *final_buff;
/* This amount is only necessary if reading the entire cluster into memory */
#define IXL_FINAL_BUFF_SIZE (1280 * 1024)
- final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_WAITOK);
+ final_buff = malloc(IXL_FINAL_BUFF_SIZE, M_DEVBUF, M_NOWAIT);
if (final_buff == NULL) {
device_printf(dev, "Could not allocate memory for output.\n");
goto out;
@@ -4893,78 +4009,98 @@ out:
}
static int
-ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
+ixl_start_fw_lldp(struct ixl_pf *pf)
{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int error = 0;
- int state, new_state;
enum i40e_status_code status;
- state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &new_state, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- /* Already in requested state */
- if (new_state == state)
- return (error);
-
- if (new_state == 0) {
- if (hw->mac.type == I40E_MAC_X722 || hw->func_caps.npar_enable != 0) {
- device_printf(dev, "Disabling FW LLDP agent is not supported on this device\n");
+ status = i40e_aq_start_lldp(hw, false, NULL);
+ if (status != I40E_SUCCESS) {
+ switch (hw->aq.asq_last_status) {
+ case I40E_AQ_RC_EEXIST:
+ device_printf(pf->dev,
+ "FW LLDP agent is already running\n");
+ break;
+ case I40E_AQ_RC_EPERM:
+ device_printf(pf->dev,
+ "Device configuration forbids SW from starting "
+ "the LLDP agent. Set the \"LLDP Agent\" UEFI HII "
+ "attribute to \"Enabled\" to use this sysctl\n");
+ return (EINVAL);
+ default:
+ device_printf(pf->dev,
+ "Starting FW LLDP agent failed: error: %s, %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return (EINVAL);
}
+ }
- if (pf->hw.aq.api_maj_ver < 1 ||
- (pf->hw.aq.api_maj_ver == 1 &&
- pf->hw.aq.api_min_ver < 7)) {
- device_printf(dev, "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
+ atomic_clear_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ return (0);
+}
+
+static int
+ixl_stop_fw_lldp(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+
+ if (hw->func_caps.npar_enable != 0) {
+ device_printf(dev,
+ "Disabling FW LLDP agent is not supported on this device\n");
+ return (EINVAL);
+ }
+
+ if ((hw->flags & I40E_HW_FLAG_FW_LLDP_STOPPABLE) == 0) {
+ device_printf(dev,
+ "Disabling FW LLDP agent is not supported in this FW version. Please update FW to enable this feature.\n");
+ return (EINVAL);
+ }
+
+ status = i40e_aq_stop_lldp(hw, true, false, NULL);
+ if (status != I40E_SUCCESS) {
+ if (hw->aq.asq_last_status != I40E_AQ_RC_EPERM) {
+ device_printf(dev,
+ "Disabling FW LLDP agent failed: error: %s, %s\n",
+ i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
return (EINVAL);
}
- i40e_aq_stop_lldp(&pf->hw, true, NULL);
- i40e_aq_set_dcb_parameters(&pf->hw, true, NULL);
- atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
- } else {
- status = i40e_aq_start_lldp(&pf->hw, NULL);
- if (status != I40E_SUCCESS && hw->aq.asq_last_status == I40E_AQ_RC_EEXIST)
- device_printf(dev, "FW LLDP agent is already running\n");
- atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ device_printf(dev, "FW LLDP agent is already stopped\n");
}
+#ifndef EXTERNAL_RELEASE
+ /* Let the FW set default DCB configuration on link UP as described in DCR 307.1 */
+#endif
+ i40e_aq_set_dcb_parameters(hw, true, NULL);
+ atomic_set_32(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
return (0);
}
-/*
- * Get FW LLDP Agent status
- */
-int
-ixl_get_fw_lldp_status(struct ixl_pf *pf)
+static int
+ixl_sysctl_fw_lldp(SYSCTL_HANDLER_ARGS)
{
- enum i40e_status_code ret = I40E_SUCCESS;
- struct i40e_lldp_variables lldp_cfg;
- struct i40e_hw *hw = &pf->hw;
- u8 adminstatus = 0;
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int state, new_state, error = 0;
+
+ state = new_state = ((pf->state & IXL_PF_STATE_FW_LLDP_DISABLED) == 0);
- ret = i40e_read_lldp_cfg(hw, &lldp_cfg);
- if (ret)
- return ret;
+ /* Read in new mode */
+ error = sysctl_handle_int(oidp, &new_state, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
- /* Get the LLDP AdminStatus for the current port */
- adminstatus = lldp_cfg.adminstatus >> (hw->port * 4);
- adminstatus &= 0xf;
+ /* Already in requested state */
+ if (new_state == state)
+ return (error);
- /* Check if LLDP agent is disabled */
- if (!adminstatus) {
- device_printf(pf->dev, "FW LLDP agent is disabled for this PF.\n");
- atomic_set_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
- } else
- atomic_clear_int(&pf->state, IXL_PF_STATE_FW_LLDP_DISABLED);
+ if (new_state == 0)
+ return ixl_stop_fw_lldp(pf);
- return (0);
+ return ixl_start_fw_lldp(pf);
}
int
@@ -5042,28 +4178,6 @@ ixl_sysctl_do_global_reset(SYSCTL_HANDLER_ARGS)
return (error);
}
-static int
-ixl_sysctl_do_emp_reset(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- int requested = 0, error = 0;
-
- /* Read in new mode */
- error = sysctl_handle_int(oidp, &requested, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
-
- /* TODO: Find out how to bypass this */
- if (!(rd32(hw, 0x000B818C) & 0x1)) {
- device_printf(pf->dev, "SW not allowed to initiate EMPR\n");
- error = EINVAL;
- } else
- wr32(hw, I40E_GLGEN_RTRIG, I40E_GLGEN_RTRIG_EMPFWR_MASK);
-
- return (error);
-}
-
/*
* Print out mapping of TX queue indexes and Rx queue indexes
* to MSI-X vectors.
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index c0040b914d1c..e589bb8392cd 100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -892,12 +892,11 @@ ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
}
void
-ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi)
+ixl_vsi_add_queues_stats(struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx)
{
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
struct sysctl_oid_list *vsi_list, *queue_list;
struct sysctl_oid *queue_node;
- char queue_namebuf[32];
+ char queue_namebuf[IXL_QUEUE_NAME_LEN];
struct ixl_rx_queue *rx_que;
struct ixl_tx_queue *tx_que;
@@ -909,7 +908,7 @@ ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi)
/* Queue statistics */
for (int q = 0; q < vsi->num_rx_queues; q++) {
bzero(queue_namebuf, sizeof(queue_namebuf));
- snprintf(queue_namebuf, QUEUE_NAME_LEN, "rxq%02d", q);
+ snprintf(queue_namebuf, sizeof(queue_namebuf), "rxq%02d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
OID_AUTO, queue_namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE,
NULL, "RX Queue #");
@@ -937,7 +936,7 @@ ixl_add_queues_sysctls(device_t dev, struct ixl_vsi *vsi)
}
for (int q = 0; q < vsi->num_tx_queues; q++) {
bzero(queue_namebuf, sizeof(queue_namebuf));
- snprintf(queue_namebuf, QUEUE_NAME_LEN, "txq%02d", q);
+ snprintf(queue_namebuf, sizeof(queue_namebuf), "txq%02d", q);
queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
OID_AUTO, queue_namebuf, CTLFLAG_RD | CTLFLAG_MPSAFE,
NULL, "TX Queue #");