aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJosh Paetzel <jpaetzel@FreeBSD.org>2016-09-22 22:51:11 +0000
committerJosh Paetzel <jpaetzel@FreeBSD.org>2016-09-22 22:51:11 +0000
commitc2625e6e38faa983542b92fa753a2223f446654c (patch)
treeaf357094d65ae66a7f5e1cc4743dccfc54e7b523
parent3673f7136a97374e1b992a5b6d83a99066d8d80a (diff)
downloadsrc-c2625e6e38faa983542b92fa753a2223f446654c.tar.gz
src-c2625e6e38faa983542b92fa753a2223f446654c.zip
Update oce to version 11.0.50.0
Submitted by: Venkat Duvvuru <venkatkumar.duvvuru@broadcom.com>
Notes
Notes: svn path=/head/; revision=306219
-rw-r--r--sys/dev/oce/oce_hw.c5
-rw-r--r--sys/dev/oce/oce_hw.h529
-rw-r--r--sys/dev/oce/oce_if.c1138
-rw-r--r--sys/dev/oce/oce_if.h122
-rw-r--r--sys/dev/oce/oce_mbox.c304
-rw-r--r--sys/dev/oce/oce_queue.c291
-rw-r--r--sys/dev/oce/oce_sysctl.c177
-rw-r--r--sys/dev/oce/oce_user.h121
8 files changed, 2188 insertions, 499 deletions
diff --git a/sys/dev/oce/oce_hw.c b/sys/dev/oce/oce_hw.c
index aad5795a97e5..72f3c556f85e 100644
--- a/sys/dev/oce/oce_hw.c
+++ b/sys/dev/oce/oce_hw.c
@@ -393,6 +393,11 @@ oce_create_nw_interface(POCE_SOFTC sc)
if (IS_SH(sc) || IS_XE201(sc))
capab_flags |= MBX_RX_IFACE_FLAGS_MULTICAST;
+ if (sc->enable_hwlro) {
+ capab_flags |= MBX_RX_IFACE_FLAGS_LRO;
+ capab_en_flags |= MBX_RX_IFACE_FLAGS_LRO;
+ }
+
/* enable capabilities controlled via driver startup parameters */
if (is_rss_enabled(sc))
capab_en_flags |= MBX_RX_IFACE_FLAGS_RSS;
diff --git a/sys/dev/oce/oce_hw.h b/sys/dev/oce/oce_hw.h
index 1ad3f7889e0b..edb029c1e282 100644
--- a/sys/dev/oce/oce_hw.h
+++ b/sys/dev/oce/oce_hw.h
@@ -111,6 +111,9 @@
#define PD_MPU_MBOX_DB 0x0160
#define PD_MQ_DB 0x0140
+#define DB_OFFSET 0xc0
+#define DB_LRO_RQ_ID_MASK 0x7FF
+
/* EQE completion types */
#define EQ_MINOR_CODE_COMPLETION 0x00
#define EQ_MINOR_CODE_OTHER 0x01
@@ -180,6 +183,7 @@
#define ASYNC_EVENT_GRP5 0x5
#define ASYNC_EVENT_CODE_DEBUG 0x6
#define ASYNC_EVENT_PVID_STATE 0x3
+#define ASYNC_EVENT_OS2BMC 0x5
#define ASYNC_EVENT_DEBUG_QNQ 0x1
#define ASYNC_EVENT_CODE_SLIPORT 0x11
#define VLAN_VID_MASK 0x0FFF
@@ -722,6 +726,34 @@ struct oce_async_cqe_link_state {
} u0;
};
+/* OS2BMC async event */
+struct oce_async_evt_grp5_os2bmc {
+ union {
+ struct {
+ uint32_t lrn_enable:1;
+ uint32_t lrn_disable:1;
+ uint32_t mgmt_enable:1;
+ uint32_t mgmt_disable:1;
+ uint32_t rsvd0:12;
+ uint32_t vlan_tag:16;
+ uint32_t arp_filter:1;
+ uint32_t dhcp_client_filt:1;
+ uint32_t dhcp_server_filt:1;
+ uint32_t net_bios_filt:1;
+ uint32_t rsvd1:3;
+ uint32_t bcast_filt:1;
+ uint32_t ipv6_nbr_filt:1;
+ uint32_t ipv6_ra_filt:1;
+ uint32_t ipv6_ras_filt:1;
+ uint32_t rsvd2[4];
+ uint32_t mcast_filt:1;
+ uint32_t rsvd3:16;
+ uint32_t evt_tag;
+ uint32_t dword3;
+ } s;
+ uint32_t dword[4];
+ } u;
+};
/* PVID aync event */
struct oce_async_event_grp5_pvid_state {
@@ -1396,7 +1428,7 @@ typedef union oce_cq_ctx_u {
uint32_t dw5rsvd3:1;
uint32_t eventable:1;
/* dw6 */
- uint32_t eq_id:8;
+ uint32_t eq_id:16;
uint32_t dw6rsvd1:15;
uint32_t armed:1;
/* dw7 */
@@ -2403,8 +2435,8 @@ struct oce_nic_hdr_wqe {
uint32_t tcpcs:1;
uint32_t udpcs:1;
uint32_t ipcs:1;
- uint32_t rsvd3:1;
- uint32_t rsvd2:1;
+ uint32_t mgmt:1;
+ uint32_t lso6:1;
uint32_t forward:1;
uint32_t crc:1;
uint32_t event:1;
@@ -2426,8 +2458,8 @@ struct oce_nic_hdr_wqe {
uint32_t event:1;
uint32_t crc:1;
uint32_t forward:1;
- uint32_t rsvd2:1;
- uint32_t rsvd3:1;
+ uint32_t lso6:1;
+ uint32_t mgmt:1;
uint32_t ipcs:1;
uint32_t udpcs:1;
uint32_t tcpcs:1;
@@ -3010,6 +3042,53 @@ struct oce_rxf_stats_v0 {
uint32_t rsvd1[6];
};
+struct oce_port_rxf_stats_v2 {
+ uint32_t rsvd0[10];
+ uint32_t roce_bytes_received_lsd;
+ uint32_t roce_bytes_received_msd;
+ uint32_t rsvd1[5];
+ uint32_t roce_frames_received;
+ uint32_t rx_crc_errors;
+ uint32_t rx_alignment_symbol_errors;
+ uint32_t rx_pause_frames;
+ uint32_t rx_priority_pause_frames;
+ uint32_t rx_control_frames;
+ uint32_t rx_in_range_errors;
+ uint32_t rx_out_range_errors;
+ uint32_t rx_frame_too_long;
+ uint32_t rx_address_match_errors;
+ uint32_t rx_dropped_too_small;
+ uint32_t rx_dropped_too_short;
+ uint32_t rx_dropped_header_too_small;
+ uint32_t rx_dropped_tcp_length;
+ uint32_t rx_dropped_runt;
+ uint32_t rsvd2[10];
+ uint32_t rx_ip_checksum_errs;
+ uint32_t rx_tcp_checksum_errs;
+ uint32_t rx_udp_checksum_errs;
+ uint32_t rsvd3[7];
+ uint32_t rx_switched_unicast_packets;
+ uint32_t rx_switched_multicast_packets;
+ uint32_t rx_switched_broadcast_packets;
+ uint32_t rsvd4[3];
+ uint32_t tx_pauseframes;
+ uint32_t tx_priority_pauseframes;
+ uint32_t tx_controlframes;
+ uint32_t rsvd5[10];
+ uint32_t rxpp_fifo_overflow_drop;
+ uint32_t rx_input_fifo_overflow_drop;
+ uint32_t pmem_fifo_overflow_drop;
+ uint32_t jabber_events;
+ uint32_t rsvd6[3];
+ uint32_t rx_drops_payload_size;
+ uint32_t rx_drops_clipped_header;
+ uint32_t rx_drops_crc;
+ uint32_t roce_drops_payload_len;
+ uint32_t roce_drops_crc;
+ uint32_t rsvd7[19];
+};
+
+
struct oce_port_rxf_stats_v1 {
uint32_t rsvd0[12];
uint32_t rx_crc_errors;
@@ -3046,6 +3125,20 @@ struct oce_port_rxf_stats_v1 {
uint32_t rsvd5[3];
};
+struct oce_rxf_stats_v2 {
+ struct oce_port_rxf_stats_v2 port[4];
+ uint32_t rsvd0[2];
+ uint32_t rx_drops_no_pbuf;
+ uint32_t rx_drops_no_txpb;
+ uint32_t rx_drops_no_erx_descr;
+ uint32_t rx_drops_no_tpre_descr;
+ uint32_t rsvd1[6];
+ uint32_t rx_drops_too_many_frags;
+ uint32_t rx_drops_invalid_ring;
+ uint32_t forwarded_packets;
+ uint32_t rx_drops_mtu;
+ uint32_t rsvd2[35];
+};
struct oce_rxf_stats_v1 {
struct oce_port_rxf_stats_v1 port[4];
@@ -3062,6 +3155,11 @@ struct oce_rxf_stats_v1 {
uint32_t rsvd2[14];
};
+struct oce_erx_stats_v2 {
+ uint32_t rx_drops_no_fragments[136];
+ uint32_t rsvd[3];
+};
+
struct oce_erx_stats_v1 {
uint32_t rx_drops_no_fragments[68];
uint32_t rsvd[4];
@@ -3078,6 +3176,15 @@ struct oce_pmem_stats {
uint32_t rsvd[5];
};
+struct oce_hw_stats_v2 {
+ struct oce_rxf_stats_v2 rxf;
+ uint32_t rsvd0[OCE_TXP_SW_SZ];
+ struct oce_erx_stats_v2 erx;
+ struct oce_pmem_stats pmem;
+ uint32_t rsvd1[18];
+};
+
+
struct oce_hw_stats_v1 {
struct oce_rxf_stats_v1 rxf;
uint32_t rsvd0[OCE_TXP_SW_SZ];
@@ -3093,32 +3200,22 @@ struct oce_hw_stats_v0 {
struct oce_pmem_stats pmem;
};
-struct mbx_get_nic_stats_v0 {
- struct mbx_hdr hdr;
- union {
- struct {
- uint32_t rsvd0;
- } req;
-
- union {
- struct oce_hw_stats_v0 stats;
- } rsp;
- } params;
-};
-
-struct mbx_get_nic_stats {
- struct mbx_hdr hdr;
- union {
- struct {
- uint32_t rsvd0;
- } req;
-
- struct {
- struct oce_hw_stats_v1 stats;
- } rsp;
- } params;
-};
-
+#define MBX_GET_NIC_STATS(version) \
+ struct mbx_get_nic_stats_v##version { \
+ struct mbx_hdr hdr; \
+ union { \
+ struct { \
+ uint32_t rsvd0; \
+ } req; \
+ union { \
+ struct oce_hw_stats_v##version stats; \
+ } rsp; \
+ } params; \
+}
+
+MBX_GET_NIC_STATS(0);
+MBX_GET_NIC_STATS(1);
+MBX_GET_NIC_STATS(2);
/* [18(0x12)] NIC_GET_PPORT_STATS */
struct pport_stats {
@@ -3728,3 +3825,373 @@ enum OCE_QUEUE_RX_STATS {
QUEUE_RX_BUFFER_ERRORS = 8,
QUEUE_RX_N_WORDS = 10
};
+
+/* HW LRO structures */
+struct mbx_nic_query_lro_capabilities {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+ uint32_t rsvd[6];
+ } req;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t lro_flags;
+ uint16_t lro_rq_cnt;
+ uint16_t plro_max_offload;
+ uint32_t rsvd[4];
+#else
+ uint32_t lro_flags;
+ uint16_t plro_max_offload;
+ uint16_t lro_rq_cnt;
+ uint32_t rsvd[4];
+#endif
+ } rsp;
+ } params;
+};
+
+struct mbx_nic_set_iface_lro_config {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t lro_flags;
+ uint32_t iface_id;
+ uint32_t max_clsc_byte_cnt;
+ uint32_t max_clsc_seg_cnt;
+ uint32_t max_clsc_usec_delay;
+ uint32_t min_clsc_frame_byte_cnt;
+ uint32_t rsvd[2];
+#else
+ uint32_t lro_flags;
+ uint32_t iface_id;
+ uint32_t max_clsc_byte_cnt;
+ uint32_t max_clsc_seg_cnt;
+ uint32_t max_clsc_usec_delay;
+ uint32_t min_clsc_frame_byte_cnt;
+ uint32_t rsvd[2];
+#endif
+ } req;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint32_t lro_flags;
+ uint32_t rsvd[7];
+#else
+ uint32_t lro_flags;
+ uint32_t rsvd[7];
+#endif
+ } rsp;
+ } params;
+};
+
+
+struct mbx_create_nic_rq_v2 {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint8_t num_pages;
+ uint8_t frag_size;
+ uint16_t cq_id;
+
+ uint32_t if_id;
+
+ uint16_t page_size;
+ uint16_t max_frame_size;
+
+ uint16_t rsvd;
+ uint16_t pd_id;
+
+ uint16_t rsvd1;
+ uint16_t rq_flags;
+
+ uint16_t hds_fixed_offset;
+ uint8_t hds_start;
+ uint8_t hds_frag;
+
+ uint16_t hds_backfill_size;
+ uint16_t hds_frag_size;
+
+ uint32_t rbq_id;
+
+ uint32_t rsvd2[8];
+
+ struct phys_addr pages[2];
+#else
+ uint16_t cq_id;
+ uint8_t frag_size;
+ uint8_t num_pages;
+
+ uint32_t if_id;
+
+ uint16_t max_frame_size;
+ uint16_t page_size;
+
+ uint16_t pd_id;
+ uint16_t rsvd;
+
+ uint16_t rq_flags;
+ uint16_t rsvd1;
+
+ uint8_t hds_frag;
+ uint8_t hds_start;
+ uint16_t hds_fixed_offset;
+
+ uint16_t hds_frag_size;
+ uint16_t hds_backfill_size;
+
+ uint32_t rbq_id;
+
+ uint32_t rsvd2[8];
+
+ struct phys_addr pages[2];
+#endif
+ } req;
+ struct {
+#ifdef _BIG_ENDIAN
+ uint8_t rsvd0;
+ uint8_t rss_cpuid;
+ uint16_t rq_id;
+
+ uint8_t db_format;
+ uint8_t db_reg_set;
+ uint16_t rsvd1;
+
+ uint32_t db_offset;
+
+ uint32_t rsvd2;
+
+ uint16_t rsvd3;
+ uint16_t rq_flags;
+
+#else
+ uint16_t rq_id;
+ uint8_t rss_cpuid;
+ uint8_t rsvd0;
+
+ uint16_t rsvd1;
+ uint8_t db_reg_set;
+ uint8_t db_format;
+
+ uint32_t db_offset;
+
+ uint32_t rsvd2;
+
+ uint16_t rq_flags;
+ uint16_t rsvd3;
+#endif
+ } rsp;
+
+ } params;
+};
+
+struct mbx_delete_nic_rq_v1 {
+ struct mbx_hdr hdr;
+ union {
+ struct {
+#ifdef _BIG_ENDIAN
+ uint16_t bypass_flush;
+ uint16_t rq_id;
+ uint16_t rsvd;
+ uint16_t rq_flags;
+#else
+ uint16_t rq_id;
+ uint16_t bypass_flush;
+ uint16_t rq_flags;
+ uint16_t rsvd;
+#endif
+ } req;
+ struct {
+ uint32_t rsvd[2];
+ } rsp;
+ } params;
+};
+
+struct nic_hwlro_singleton_cqe {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t ip_opt:1;
+ uint32_t vtp:1;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag:16;
+
+ /* dw 1 */
+ uint32_t num_frags:3;
+ uint32_t rsvd1:3;
+ uint32_t frag_index:10;
+ uint32_t rsvd:8;
+ uint32_t ipv6_frame:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t udpframe:1;
+ uint32_t tcpframe:1;
+ uint32_t ipframe:1;
+ uint32_t rss_hp:1;
+ uint32_t error:1;
+
+ /* dw 2 */
+ uint32_t valid:1;
+ uint32_t cqe_type:2;
+ uint32_t debug:7;
+ uint32_t rsvd4:6;
+ uint32_t data_offset:8;
+ uint32_t rsvd3:3;
+ uint32_t rss_bank:1;
+ uint32_t qnq:1;
+ uint32_t rsvd2:3;
+
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#else
+ /* dw 0 */
+ uint32_t vlan_tag:16;
+ uint32_t pkt_size:14;
+ uint32_t vtp:1;
+ uint32_t ip_opt:1;
+
+ /* dw 1 */
+ uint32_t error:1;
+ uint32_t rss_hp:1;
+ uint32_t ipframe:1;
+ uint32_t tcpframe:1;
+ uint32_t udpframe:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ipv6_frame:1;
+ uint32_t rsvd:8;
+ uint32_t frag_index:10;
+ uint32_t rsvd1:3;
+ uint32_t num_frags:3;
+
+ /* dw 2 */
+ uint32_t rsvd2:3;
+ uint32_t qnq:1;
+ uint32_t rss_bank:1;
+ uint32_t rsvd3:3;
+ uint32_t data_offset:8;
+ uint32_t rsvd4:6;
+ uint32_t debug:7;
+ uint32_t cqe_type:2;
+ uint32_t valid:1;
+
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#endif
+};
+
+struct nic_hwlro_cqe_part1 {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t tcp_timestamp_val;
+
+ /* dw 1 */
+ uint32_t tcp_timestamp_ecr;
+
+ /* dw 2 */
+ uint32_t valid:1;
+ uint32_t cqe_type:2;
+ uint32_t rsvd3:7;
+ uint32_t rss_policy:4;
+ uint32_t rsvd2:2;
+ uint32_t data_offset:8;
+ uint32_t rsvd1:1;
+ uint32_t lro_desc:1;
+ uint32_t lro_timer_pop:1;
+ uint32_t rss_bank:1;
+ uint32_t qnq:1;
+ uint32_t rsvd:2;
+ uint32_t rss_flush:1;
+
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#else
+ /* dw 0 */
+ uint32_t tcp_timestamp_val;
+
+ /* dw 1 */
+ uint32_t tcp_timestamp_ecr;
+
+ /* dw 2 */
+ uint32_t rss_flush:1;
+ uint32_t rsvd:2;
+ uint32_t qnq:1;
+ uint32_t rss_bank:1;
+ uint32_t lro_timer_pop:1;
+ uint32_t lro_desc:1;
+ uint32_t rsvd1:1;
+ uint32_t data_offset:8;
+ uint32_t rsvd2:2;
+ uint32_t rss_policy:4;
+ uint32_t rsvd3:7;
+ uint32_t cqe_type:2;
+ uint32_t valid:1;
+
+ /* dw 3 */
+ uint32_t rss_hash_value;
+#endif
+};
+
+struct nic_hwlro_cqe_part2 {
+#ifdef _BIG_ENDIAN
+ /* dw 0 */
+ uint32_t ip_opt:1;
+ uint32_t vtp:1;
+ uint32_t pkt_size:14;
+ uint32_t vlan_tag:16;
+
+ /* dw 1 */
+ uint32_t tcp_window:16;
+ uint32_t coalesced_size:16;
+
+ /* dw 2 */
+ uint32_t valid:1;
+ uint32_t cqe_type:2;
+ uint32_t rsvd:2;
+ uint32_t push:1;
+ uint32_t ts_opt:1;
+ uint32_t threshold:1;
+ uint32_t seg_cnt:8;
+ uint32_t frame_lifespan:8;
+ uint32_t ipv6_frame:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t udpframe:1;
+ uint32_t tcpframe:1;
+ uint32_t ipframe:1;
+ uint32_t rss_hp:1;
+ uint32_t error:1;
+
+ /* dw 3 */
+ uint32_t tcp_ack_num;
+#else
+ /* dw 0 */
+ uint32_t vlan_tag:16;
+ uint32_t pkt_size:14;
+ uint32_t vtp:1;
+ uint32_t ip_opt:1;
+
+ /* dw 1 */
+ uint32_t coalesced_size:16;
+ uint32_t tcp_window:16;
+
+ /* dw 2 */
+ uint32_t error:1;
+ uint32_t rss_hp:1;
+ uint32_t ipframe:1;
+ uint32_t tcpframe:1;
+ uint32_t udpframe:1;
+ uint32_t ip_cksum_pass:1;
+ uint32_t l4_cksum_pass:1;
+ uint32_t ipv6_frame:1;
+ uint32_t frame_lifespan:8;
+ uint32_t seg_cnt:8;
+ uint32_t threshold:1;
+ uint32_t ts_opt:1;
+ uint32_t push:1;
+ uint32_t rsvd:2;
+ uint32_t cqe_type:2;
+ uint32_t valid:1;
+
+ /* dw 3 */
+ uint32_t tcp_ack_num;
+#endif
+};
diff --git a/sys/dev/oce/oce_if.c b/sys/dev/oce/oce_if.c
index 370461291234..d09977eb6a89 100644
--- a/sys/dev/oce/oce_if.c
+++ b/sys/dev/oce/oce_if.c
@@ -42,77 +42,92 @@
#include "opt_inet.h"
#include "oce_if.h"
+#include "oce_user.h"
+
+#define is_tso_pkt(m) (m->m_pkthdr.csum_flags & CSUM_TSO)
/* UE Status Low CSR */
static char *ue_status_low_desc[] = {
- "CEV",
- "CTX",
- "DBUF",
- "ERX",
- "Host",
- "MPU",
- "NDMA",
- "PTC ",
- "RDMA ",
- "RXF ",
- "RXIPS ",
- "RXULP0 ",
- "RXULP1 ",
- "RXULP2 ",
- "TIM ",
- "TPOST ",
- "TPRE ",
- "TXIPS ",
- "TXULP0 ",
- "TXULP1 ",
- "UC ",
- "WDMA ",
- "TXULP2 ",
- "HOST1 ",
- "P0_OB_LINK ",
- "P1_OB_LINK ",
- "HOST_GPIO ",
- "MBOX ",
- "AXGMAC0",
- "AXGMAC1",
- "JTAG",
- "MPU_INTPEND"
+ "CEV",
+ "CTX",
+ "DBUF",
+ "ERX",
+ "Host",
+ "MPU",
+ "NDMA",
+ "PTC ",
+ "RDMA ",
+ "RXF ",
+ "RXIPS ",
+ "RXULP0 ",
+ "RXULP1 ",
+ "RXULP2 ",
+ "TIM ",
+ "TPOST ",
+ "TPRE ",
+ "TXIPS ",
+ "TXULP0 ",
+ "TXULP1 ",
+ "UC ",
+ "WDMA ",
+ "TXULP2 ",
+ "HOST1 ",
+ "P0_OB_LINK ",
+ "P1_OB_LINK ",
+ "HOST_GPIO ",
+ "MBOX ",
+ "AXGMAC0",
+ "AXGMAC1",
+ "JTAG",
+ "MPU_INTPEND"
};
/* UE Status High CSR */
static char *ue_status_hi_desc[] = {
- "LPCMEMHOST",
- "MGMT_MAC",
- "PCS0ONLINE",
- "MPU_IRAM",
- "PCS1ONLINE",
- "PCTL0",
- "PCTL1",
- "PMEM",
- "RR",
- "TXPB",
- "RXPP",
- "XAUI",
- "TXP",
- "ARM",
- "IPC",
- "HOST2",
- "HOST3",
- "HOST4",
- "HOST5",
- "HOST6",
- "HOST7",
- "HOST8",
- "HOST9",
- "NETC",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown",
- "Unknown"
+ "LPCMEMHOST",
+ "MGMT_MAC",
+ "PCS0ONLINE",
+ "MPU_IRAM",
+ "PCS1ONLINE",
+ "PCTL0",
+ "PCTL1",
+ "PMEM",
+ "RR",
+ "TXPB",
+ "RXPP",
+ "XAUI",
+ "TXP",
+ "ARM",
+ "IPC",
+ "HOST2",
+ "HOST3",
+ "HOST4",
+ "HOST5",
+ "HOST6",
+ "HOST7",
+ "HOST8",
+ "HOST9",
+ "NETC",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown",
+ "Unknown"
+};
+
+struct oce_common_cqe_info{
+ uint8_t vtp:1;
+ uint8_t l4_cksum_pass:1;
+ uint8_t ip_cksum_pass:1;
+ uint8_t ipv6_frame:1;
+ uint8_t qnq:1;
+ uint8_t rsvd:3;
+ uint8_t num_frags;
+ uint16_t pkt_size;
+ uint16_t vtag;
};
@@ -140,17 +155,19 @@ static int oce_media_change(struct ifnet *ifp);
/* Transmit routines prototypes */
static int oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index);
static void oce_tx_restart(POCE_SOFTC sc, struct oce_wq *wq);
-static void oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx,
- uint32_t status);
+static void oce_process_tx_completion(struct oce_wq *wq);
static int oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m,
struct oce_wq *wq);
/* Receive routines prototypes */
-static void oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
static int oce_cqe_vtp_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
static int oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe);
-static void oce_rx(struct oce_rq *rq, uint32_t rqe_idx,
- struct oce_nic_rx_cqe *cqe);
+static void oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe);
+static void oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq);
+static uint16_t oce_rq_handler_lro(void *arg);
+static void oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2);
+static void oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2);
+static void oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m);
/* Helper function prototypes in this file */
static int oce_attach_ifp(POCE_SOFTC sc);
@@ -169,11 +186,12 @@ static void process_link_state(POCE_SOFTC sc,
static int oce_tx_asic_stall_verify(POCE_SOFTC sc, struct mbuf *m);
static void oce_get_config(POCE_SOFTC sc);
static struct mbuf *oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete);
+static void oce_read_env_variables(POCE_SOFTC sc);
+
/* IP specific */
#if defined(INET6) || defined(INET)
static int oce_init_lro(POCE_SOFTC sc);
-static void oce_rx_flush_lro(struct oce_rq *rq);
static struct mbuf * oce_tso_setup(POCE_SOFTC sc, struct mbuf **mpp);
#endif
@@ -206,7 +224,7 @@ const char component_revision[32] = {"///" COMPONENT_REVISION "///"};
/* Module capabilites and parameters */
uint32_t oce_max_rsp_handled = OCE_MAX_RSP_HANDLED;
uint32_t oce_enable_rss = OCE_MODCAP_RSS;
-
+uint32_t oce_rq_buf_size = 2048;
TUNABLE_INT("hw.oce.max_rsp_handled", &oce_max_rsp_handled);
TUNABLE_INT("hw.oce.enable_rss", &oce_enable_rss);
@@ -222,8 +240,10 @@ static uint32_t supportedDevices[] = {
(PCI_VENDOR_EMULEX << 16) | PCI_PRODUCT_SH
};
+POCE_SOFTC softc_head = NULL;
+POCE_SOFTC softc_tail = NULL;
-
+struct oce_rdma_if *oce_rdma_if = NULL;
/*****************************************************************************
* Driver entry points functions *
@@ -292,7 +312,8 @@ oce_attach(device_t dev)
sc->tx_ring_size = OCE_TX_RING_SIZE;
sc->rx_ring_size = OCE_RX_RING_SIZE;
- sc->rq_frag_size = OCE_RQ_BUF_SIZE;
+ /* receive fragment size should be multiple of 2K */
+ sc->rq_frag_size = ((oce_rq_buf_size / 2048) * 2048);
sc->flow_control = OCE_DEFAULT_FLOW_CONTROL;
sc->promisc = OCE_DEFAULT_PROMISCUOUS;
@@ -304,6 +325,8 @@ oce_attach(device_t dev)
if (rc)
goto pci_res_free;
+ oce_read_env_variables(sc);
+
oce_get_config(sc);
setup_max_queues_want(sc);
@@ -341,11 +364,19 @@ oce_attach(device_t dev)
oce_add_sysctls(sc);
- callout_init(&sc->timer, 1);
+ callout_init(&sc->timer, CALLOUT_MPSAFE);
rc = callout_reset(&sc->timer, 2 * hz, oce_local_timer, sc);
if (rc)
goto stats_free;
+ sc->next =NULL;
+ if (softc_tail != NULL) {
+ softc_tail->next = sc;
+ } else {
+ softc_head = sc;
+ }
+ softc_tail = sc;
+
return 0;
stats_free:
@@ -383,6 +414,22 @@ static int
oce_detach(device_t dev)
{
POCE_SOFTC sc = device_get_softc(dev);
+ POCE_SOFTC poce_sc_tmp, *ppoce_sc_tmp1, poce_sc_tmp2 = NULL;
+
+ poce_sc_tmp = softc_head;
+ ppoce_sc_tmp1 = &softc_head;
+ while (poce_sc_tmp != NULL) {
+ if (poce_sc_tmp == sc) {
+ *ppoce_sc_tmp1 = sc->next;
+ if (sc->next == NULL) {
+ softc_tail = poce_sc_tmp2;
+ }
+ break;
+ }
+ poce_sc_tmp2 = poce_sc_tmp;
+ ppoce_sc_tmp1 = &poce_sc_tmp->next;
+ poce_sc_tmp = poce_sc_tmp->next;
+ }
LOCK(&sc->dev_lock);
oce_if_deactivate(sc);
@@ -520,8 +567,16 @@ oce_ioctl(struct ifnet *ifp, u_long command, caddr_t data)
oce_vid_config(sc);
}
#if defined(INET6) || defined(INET)
- if (u & IFCAP_LRO)
+ if (u & IFCAP_LRO) {
ifp->if_capenable ^= IFCAP_LRO;
+ if(sc->enable_hwlro) {
+ if(ifp->if_capenable & IFCAP_LRO) {
+ rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
+ }else {
+ rc = oce_mbox_nic_set_iface_lro_config(sc, 0);
+ }
+ }
+ }
#endif
break;
@@ -563,6 +618,9 @@ oce_multiq_start(struct ifnet *ifp, struct mbuf *m)
int queue_index = 0;
int status = 0;
+ if (!sc->link_status)
+ return ENXIO;
+
if (M_HASHTYPE_GET(m) != M_HASHTYPE_NONE)
queue_index = m->m_pkthdr.flowid % sc->nwqs;
@@ -653,20 +711,41 @@ oce_setup_intr(POCE_SOFTC sc)
{
int rc = 0, use_intx = 0;
int vector = 0, req_vectors = 0;
+ int tot_req_vectors, tot_vectors;
if (is_rss_enabled(sc))
req_vectors = MAX((sc->nrqs - 1), sc->nwqs);
else
req_vectors = 1;
- if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
+ tot_req_vectors = req_vectors;
+ if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
+ if (req_vectors > 1) {
+ tot_req_vectors += OCE_RDMA_VECTORS;
+ sc->roce_intr_count = OCE_RDMA_VECTORS;
+ }
+ }
+
+ if (sc->flags & OCE_FLAGS_MSIX_CAPABLE) {
sc->intr_count = req_vectors;
- rc = pci_alloc_msix(sc->dev, &sc->intr_count);
+ tot_vectors = tot_req_vectors;
+ rc = pci_alloc_msix(sc->dev, &tot_vectors);
if (rc != 0) {
use_intx = 1;
pci_release_msi(sc->dev);
- } else
- sc->flags |= OCE_FLAGS_USING_MSIX;
+ } else {
+ if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
+ if (tot_vectors < tot_req_vectors) {
+ if (sc->intr_count < (2 * OCE_RDMA_VECTORS)) {
+ sc->roce_intr_count = (tot_vectors / 2);
+ }
+ sc->intr_count = tot_vectors - sc->roce_intr_count;
+ }
+ } else {
+ sc->intr_count = tot_vectors;
+ }
+ sc->flags |= OCE_FLAGS_USING_MSIX;
+ }
} else
use_intx = 1;
@@ -854,6 +933,79 @@ oce_media_change(struct ifnet *ifp)
}
+static void oce_is_pkt_dest_bmc(POCE_SOFTC sc,
+ struct mbuf *m, boolean_t *os2bmc,
+ struct mbuf **m_new)
+{
+ struct ether_header *eh = NULL;
+
+ eh = mtod(m, struct ether_header *);
+
+ if (!is_os2bmc_enabled(sc) || *os2bmc) {
+ *os2bmc = FALSE;
+ goto done;
+ }
+ if (!ETHER_IS_MULTICAST(eh->ether_dhost))
+ goto done;
+
+ if (is_mc_allowed_on_bmc(sc, eh) ||
+ is_bc_allowed_on_bmc(sc, eh) ||
+ is_arp_allowed_on_bmc(sc, ntohs(eh->ether_type))) {
+ *os2bmc = TRUE;
+ goto done;
+ }
+
+ if (mtod(m, struct ip *)->ip_p == IPPROTO_IPV6) {
+ struct ip6_hdr *ip6 = mtod(m, struct ip6_hdr *);
+ uint8_t nexthdr = ip6->ip6_nxt;
+ if (nexthdr == IPPROTO_ICMPV6) {
+ struct icmp6_hdr *icmp6 = (struct icmp6_hdr *)(ip6 + 1);
+ switch (icmp6->icmp6_type) {
+ case ND_ROUTER_ADVERT:
+ *os2bmc = is_ipv6_ra_filt_enabled(sc);
+ goto done;
+ case ND_NEIGHBOR_ADVERT:
+ *os2bmc = is_ipv6_na_filt_enabled(sc);
+ goto done;
+ default:
+ break;
+ }
+ }
+ }
+
+ if (mtod(m, struct ip *)->ip_p == IPPROTO_UDP) {
+ struct ip *ip = mtod(m, struct ip *);
+ int iphlen = ip->ip_hl << 2;
+ struct udphdr *uh = (struct udphdr *)((caddr_t)ip + iphlen);
+ switch (uh->uh_dport) {
+ case DHCP_CLIENT_PORT:
+ *os2bmc = is_dhcp_client_filt_enabled(sc);
+ goto done;
+ case DHCP_SERVER_PORT:
+ *os2bmc = is_dhcp_srvr_filt_enabled(sc);
+ goto done;
+ case NET_BIOS_PORT1:
+ case NET_BIOS_PORT2:
+ *os2bmc = is_nbios_filt_enabled(sc);
+ goto done;
+ case DHCPV6_RAS_PORT:
+ *os2bmc = is_ipv6_ras_filt_enabled(sc);
+ goto done;
+ default:
+ break;
+ }
+ }
+done:
+ if (*os2bmc) {
+ *m_new = m_dup(m, M_NOWAIT);
+ if (!*m_new) {
+ *os2bmc = FALSE;
+ return;
+ }
+ *m_new = oce_insert_vlan_tag(sc, *m_new, NULL);
+ }
+}
+
/*****************************************************************************
@@ -865,14 +1017,16 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
{
int rc = 0, i, retry_cnt = 0;
bus_dma_segment_t segs[OCE_MAX_TX_ELEMENTS];
- struct mbuf *m, *m_temp;
+ struct mbuf *m, *m_temp, *m_new = NULL;
struct oce_wq *wq = sc->wq[wq_index];
struct oce_packet_desc *pd;
struct oce_nic_hdr_wqe *nichdr;
struct oce_nic_frag_wqe *nicfrag;
+ struct ether_header *eh = NULL;
int num_wqes;
uint32_t reg_value;
boolean_t complete = TRUE;
+ boolean_t os2bmc = FALSE;
m = *mpp;
if (!m)
@@ -883,6 +1037,13 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
goto free_ret;
}
+ /* Don't allow non-TSO packets longer than MTU */
+ if (!is_tso_pkt(m)) {
+ eh = mtod(m, struct ether_header *);
+ if(m->m_pkthdr.len > ETHER_MAX_FRAME(sc->ifp, eh->ether_type, FALSE))
+ goto free_ret;
+ }
+
if(oce_tx_asic_stall_verify(sc, m)) {
m = oce_insert_vlan_tag(sc, m, &complete);
if(!m) {
@@ -892,6 +1053,19 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
}
+ /* Lancer, SH ASIC has a bug wherein Packets that are 32 bytes or less
+ * may cause a transmit stall on that port. So the work-around is to
+ * pad short packets (<= 32 bytes) to a 36-byte length.
+ */
+ if(IS_SH(sc) || IS_XE201(sc) ) {
+ if(m->m_pkthdr.len <= 32) {
+ char buf[36];
+ bzero((void *)buf, 36);
+ m_append(m, (36 - m->m_pkthdr.len), buf);
+ }
+ }
+
+tx_start:
if (m->m_pkthdr.csum_flags & CSUM_TSO) {
/* consolidate packet buffers for TSO/LSO segment offload */
#if defined(INET6) || defined(INET)
@@ -905,7 +1079,9 @@ oce_tx(POCE_SOFTC sc, struct mbuf **mpp, int wq_index)
}
}
+
pd = &wq->pckts[wq->pkt_desc_head];
+
retry:
rc = bus_dmamap_load_mbuf_sg(wq->tag,
pd->map,
@@ -935,6 +1111,7 @@ retry:
nichdr->u0.dw[3] = 0;
nichdr->u0.s.complete = complete;
+ nichdr->u0.s.mgmt = os2bmc;
nichdr->u0.s.event = 1;
nichdr->u0.s.crc = 1;
nichdr->u0.s.forward = 0;
@@ -998,6 +1175,12 @@ retry:
bus_dmamap_sync(wq->ring->dma.tag, wq->ring->dma.map,
BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE);
reg_value = (num_wqes << 16) | wq->wq_id;
+
+ /* if os2bmc is not enabled or if the pkt is already tagged as
+ bmc, do nothing
+ */
+ oce_is_pkt_dest_bmc(sc, m, &os2bmc, &m_new);
+
OCE_WRITE_REG32(sc, db, wq->db_offset, reg_value);
} else if (rc == EFBIG) {
@@ -1015,6 +1198,11 @@ retry:
return rc;
else
goto free_ret;
+
+ if (os2bmc) {
+ m = m_new;
+ goto tx_start;
+ }
return 0;
@@ -1026,7 +1214,7 @@ free_ret:
static void
-oce_tx_complete(struct oce_wq *wq, uint32_t wqe_idx, uint32_t status)
+oce_process_tx_completion(struct oce_wq *wq)
{
struct oce_packet_desc *pd;
POCE_SOFTC sc = (POCE_SOFTC) wq->parent;
@@ -1213,6 +1401,7 @@ oce_wq_handler(void *arg)
struct oce_nic_tx_cqe *cqe;
int num_cqes = 0;
+ LOCK(&wq->tx_compl_lock);
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_tx_cqe);
@@ -1223,7 +1412,7 @@ oce_wq_handler(void *arg)
if (wq->ring->cidx >= wq->ring->num_items)
wq->ring->cidx -= wq->ring->num_items;
- oce_tx_complete(wq, cqe->u0.s.wqe_index, cqe->u0.s.status);
+ oce_process_tx_completion(wq);
wq->tx_stats.tx_compl++;
cqe->u0.dw[3] = 0;
RING_GET(cq->ring, 1);
@@ -1236,8 +1425,9 @@ oce_wq_handler(void *arg)
if (num_cqes)
oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
-
- return 0;
+
+ UNLOCK(&wq->tx_compl_lock);
+ return num_cqes;
}
@@ -1292,19 +1482,216 @@ oce_multiq_transmit(struct ifnet *ifp, struct mbuf *m, struct oce_wq *wq)
*****************************************************************************/
static void
-oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
+oce_correct_header(struct mbuf *m, struct nic_hwlro_cqe_part1 *cqe1, struct nic_hwlro_cqe_part2 *cqe2)
+{
+ uint32_t *p;
+ struct ether_header *eh = NULL;
+ struct tcphdr *tcp_hdr = NULL;
+ struct ip *ip4_hdr = NULL;
+ struct ip6_hdr *ip6 = NULL;
+ uint32_t payload_len = 0;
+
+ eh = mtod(m, struct ether_header *);
+ /* correct IP header */
+ if(!cqe2->ipv6_frame) {
+ ip4_hdr = (struct ip *)((char*)eh + sizeof(struct ether_header));
+ ip4_hdr->ip_ttl = cqe2->frame_lifespan;
+ ip4_hdr->ip_len = htons(cqe2->coalesced_size - sizeof(struct ether_header));
+ tcp_hdr = (struct tcphdr *)((char*)ip4_hdr + sizeof(struct ip));
+ }else {
+ ip6 = (struct ip6_hdr *)((char*)eh + sizeof(struct ether_header));
+ ip6->ip6_ctlun.ip6_un1.ip6_un1_hlim = cqe2->frame_lifespan;
+ payload_len = cqe2->coalesced_size - sizeof(struct ether_header)
+ - sizeof(struct ip6_hdr);
+ ip6->ip6_ctlun.ip6_un1.ip6_un1_plen = htons(payload_len);
+ tcp_hdr = (struct tcphdr *)((char*)ip6 + sizeof(struct ip6_hdr));
+ }
+
+ /* correct tcp header */
+ tcp_hdr->th_ack = htonl(cqe2->tcp_ack_num);
+ if(cqe2->push) {
+ tcp_hdr->th_flags |= TH_PUSH;
+ }
+ tcp_hdr->th_win = htons(cqe2->tcp_window);
+ tcp_hdr->th_sum = 0xffff;
+ if(cqe2->ts_opt) {
+ p = (uint32_t *)((char*)tcp_hdr + sizeof(struct tcphdr) + 2);
+ *p = cqe1->tcp_timestamp_val;
+ *(p+1) = cqe1->tcp_timestamp_ecr;
+ }
+
+ return;
+}
+
+static void
+oce_rx_mbuf_chain(struct oce_rq *rq, struct oce_common_cqe_info *cqe_info, struct mbuf **m)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+ uint32_t i = 0, frag_len = 0;
+ uint32_t len = cqe_info->pkt_size;
+ struct oce_packet_desc *pd;
+ struct mbuf *tail = NULL;
+
+ for (i = 0; i < cqe_info->num_frags; i++) {
+ if (rq->ring->cidx == rq->ring->pidx) {
+ device_printf(sc->dev,
+ "oce_rx_mbuf_chain: Invalid RX completion - Queue is empty\n");
+ return;
+ }
+ pd = &rq->pckts[rq->ring->cidx];
+
+ bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ RING_GET(rq->ring, 1);
+ rq->pending--;
+
+ frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
+ pd->mbuf->m_len = frag_len;
+
+ if (tail != NULL) {
+ /* additional fragments */
+ pd->mbuf->m_flags &= ~M_PKTHDR;
+ tail->m_next = pd->mbuf;
+ if(rq->islro)
+ tail->m_nextpkt = NULL;
+ tail = pd->mbuf;
+ } else {
+ /* first fragment, fill out much of the packet header */
+ pd->mbuf->m_pkthdr.len = len;
+ if(rq->islro)
+ pd->mbuf->m_nextpkt = NULL;
+ pd->mbuf->m_pkthdr.csum_flags = 0;
+ if (IF_CSUM_ENABLED(sc)) {
+ if (cqe_info->l4_cksum_pass) {
+ if(!cqe_info->ipv6_frame) { /* IPV4 */
+ pd->mbuf->m_pkthdr.csum_flags |=
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ }else { /* IPV6 frame */
+ if(rq->islro) {
+ pd->mbuf->m_pkthdr.csum_flags |=
+ (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
+ }
+ }
+ pd->mbuf->m_pkthdr.csum_data = 0xffff;
+ }
+ if (cqe_info->ip_cksum_pass) {
+ pd->mbuf->m_pkthdr.csum_flags |=
+ (CSUM_IP_CHECKED|CSUM_IP_VALID);
+ }
+ }
+ *m = tail = pd->mbuf;
+ }
+ pd->mbuf = NULL;
+ len -= frag_len;
+ }
+
+ return;
+}
+
+static void
+oce_rx_lro(struct oce_rq *rq, struct nic_hwlro_singleton_cqe *cqe, struct nic_hwlro_cqe_part2 *cqe2)
+{
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
+ struct nic_hwlro_cqe_part1 *cqe1 = NULL;
+ struct mbuf *m = NULL;
+ struct oce_common_cqe_info cq_info;
+
+ /* parse cqe */
+ if(cqe2 == NULL) {
+ cq_info.pkt_size = cqe->pkt_size;
+ cq_info.vtag = cqe->vlan_tag;
+ cq_info.l4_cksum_pass = cqe->l4_cksum_pass;
+ cq_info.ip_cksum_pass = cqe->ip_cksum_pass;
+ cq_info.ipv6_frame = cqe->ipv6_frame;
+ cq_info.vtp = cqe->vtp;
+ cq_info.qnq = cqe->qnq;
+ }else {
+ cqe1 = (struct nic_hwlro_cqe_part1 *)cqe;
+ cq_info.pkt_size = cqe2->coalesced_size;
+ cq_info.vtag = cqe2->vlan_tag;
+ cq_info.l4_cksum_pass = cqe2->l4_cksum_pass;
+ cq_info.ip_cksum_pass = cqe2->ip_cksum_pass;
+ cq_info.ipv6_frame = cqe2->ipv6_frame;
+ cq_info.vtp = cqe2->vtp;
+ cq_info.qnq = cqe1->qnq;
+ }
+
+ cq_info.vtag = BSWAP_16(cq_info.vtag);
+
+ cq_info.num_frags = cq_info.pkt_size / rq->cfg.frag_size;
+ if(cq_info.pkt_size % rq->cfg.frag_size)
+ cq_info.num_frags++;
+
+ oce_rx_mbuf_chain(rq, &cq_info, &m);
+
+ if (m) {
+ if(cqe2) {
+ //assert(cqe2->valid != 0);
+
+ //assert(cqe2->cqe_type != 2);
+ oce_correct_header(m, cqe1, cqe2);
+ }
+
+ m->m_pkthdr.rcvif = sc->ifp;
+#if __FreeBSD_version >= 800000
+ if (rq->queue_index)
+ m->m_pkthdr.flowid = (rq->queue_index - 1);
+ else
+ m->m_pkthdr.flowid = rq->queue_index;
+ M_HASHTYPE_SET(m, M_HASHTYPE_OPAQUE);
+#endif
+ /* This deternies if vlan tag is Valid */
+ if (cq_info.vtp) {
+ if (sc->function_mode & FNM_FLEX10_MODE) {
+ /* FLEX10. If QnQ is not set, neglect VLAN */
+ if (cq_info.qnq) {
+ m->m_pkthdr.ether_vtag = cq_info.vtag;
+ m->m_flags |= M_VLANTAG;
+ }
+ } else if (sc->pvid != (cq_info.vtag & VLAN_VID_MASK)) {
+ /* In UMC mode generally pvid will be striped by
+ hw. But in some cases we have seen it comes
+ with pvid. So if pvid == vlan, neglect vlan.
+ */
+ m->m_pkthdr.ether_vtag = cq_info.vtag;
+ m->m_flags |= M_VLANTAG;
+ }
+ }
+ if_inc_counter(sc->ifp, IFCOUNTER_IPACKETS, 1);
+
+ (*sc->ifp->if_input) (sc->ifp, m);
+
+ /* Update rx stats per queue */
+ rq->rx_stats.rx_pkts++;
+ rq->rx_stats.rx_bytes += cq_info.pkt_size;
+ rq->rx_stats.rx_frags += cq_info.num_frags;
+ rq->rx_stats.rx_ucast_pkts++;
+ }
+ return;
+}
+
+static void
+oce_rx(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
{
- uint32_t out;
- struct oce_packet_desc *pd;
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
- int i, len, frag_len;
- struct mbuf *m = NULL, *tail = NULL;
- uint16_t vtag;
+ int len;
+ struct mbuf *m = NULL;
+ struct oce_common_cqe_info cq_info;
+ uint16_t vtag = 0;
+
+ /* Is it a flush compl that has no data */
+ if(!cqe->u0.s.num_fragments)
+ goto exit;
len = cqe->u0.s.pkt_size;
if (!len) {
/*partial DMA workaround for Lancer*/
- oce_discard_rx_comp(rq, cqe);
+ oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
+ goto exit;
+ }
+
+ if (!oce_cqe_portid_valid(sc, cqe)) {
+ oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
goto exit;
}
@@ -1313,61 +1700,16 @@ oce_rx(struct oce_rq *rq, uint32_t rqe_idx, struct oce_nic_rx_cqe *cqe)
vtag = BSWAP_16(cqe->u0.s.vlan_tag);
else
vtag = cqe->u0.s.vlan_tag;
+
+ cq_info.l4_cksum_pass = cqe->u0.s.l4_cksum_pass;
+ cq_info.ip_cksum_pass = cqe->u0.s.ip_cksum_pass;
+ cq_info.ipv6_frame = cqe->u0.s.ip_ver;
+ cq_info.num_frags = cqe->u0.s.num_fragments;
+ cq_info.pkt_size = cqe->u0.s.pkt_size;
-
- for (i = 0; i < cqe->u0.s.num_fragments; i++) {
-
- if (rq->packets_out == rq->packets_in) {
- device_printf(sc->dev,
- "RQ transmit descriptor missing\n");
- }
- out = rq->packets_out + 1;
- if (out == OCE_RQ_PACKET_ARRAY_SIZE)
- out = 0;
- pd = &rq->pckts[rq->packets_out];
- rq->packets_out = out;
-
- bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(rq->tag, pd->map);
- rq->pending--;
-
- frag_len = (len > rq->cfg.frag_size) ? rq->cfg.frag_size : len;
- pd->mbuf->m_len = frag_len;
-
- if (tail != NULL) {
- /* additional fragments */
- pd->mbuf->m_flags &= ~M_PKTHDR;
- tail->m_next = pd->mbuf;
- tail = pd->mbuf;
- } else {
- /* first fragment, fill out much of the packet header */
- pd->mbuf->m_pkthdr.len = len;
- pd->mbuf->m_pkthdr.csum_flags = 0;
- if (IF_CSUM_ENABLED(sc)) {
- if (cqe->u0.s.l4_cksum_pass) {
- pd->mbuf->m_pkthdr.csum_flags |=
- (CSUM_DATA_VALID | CSUM_PSEUDO_HDR);
- pd->mbuf->m_pkthdr.csum_data = 0xffff;
- }
- if (cqe->u0.s.ip_cksum_pass) {
- if (!cqe->u0.s.ip_ver) { /* IPV4 */
- pd->mbuf->m_pkthdr.csum_flags |=
- (CSUM_IP_CHECKED|CSUM_IP_VALID);
- }
- }
- }
- m = tail = pd->mbuf;
- }
- pd->mbuf = NULL;
- len -= frag_len;
- }
+ oce_rx_mbuf_chain(rq, &cq_info, &m);
if (m) {
- if (!oce_cqe_portid_valid(sc, cqe)) {
- m_freem(m);
- goto exit;
- }
-
m->m_pkthdr.rcvif = sc->ifp;
#if __FreeBSD_version >= 800000
if (rq->queue_index)
@@ -1429,31 +1771,30 @@ exit:
}
-static void
-oce_discard_rx_comp(struct oce_rq *rq, struct oce_nic_rx_cqe *cqe)
+void
+oce_discard_rx_comp(struct oce_rq *rq, int num_frags)
{
- uint32_t out, i = 0;
+ uint32_t i = 0;
struct oce_packet_desc *pd;
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
- int num_frags = cqe->u0.s.num_fragments;
for (i = 0; i < num_frags; i++) {
- if (rq->packets_out == rq->packets_in) {
- device_printf(sc->dev,
- "RQ transmit descriptor missing\n");
- }
- out = rq->packets_out + 1;
- if (out == OCE_RQ_PACKET_ARRAY_SIZE)
- out = 0;
- pd = &rq->pckts[rq->packets_out];
- rq->packets_out = out;
+ if (rq->ring->cidx == rq->ring->pidx) {
+ device_printf(sc->dev,
+ "oce_discard_rx_comp: Invalid RX completion - Queue is empty\n");
+ return;
+ }
+ pd = &rq->pckts[rq->ring->cidx];
+ bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
+ bus_dmamap_unload(rq->tag, pd->map);
+ if (pd->mbuf != NULL) {
+ m_freem(pd->mbuf);
+ pd->mbuf = NULL;
+ }
- bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
- bus_dmamap_unload(rq->tag, pd->map);
- rq->pending--;
- m_freem(pd->mbuf);
+ RING_GET(rq->ring, 1);
+ rq->pending--;
}
-
}
@@ -1493,7 +1834,7 @@ oce_cqe_portid_valid(POCE_SOFTC sc, struct oce_nic_rx_cqe *cqe)
}
#if defined(INET6) || defined(INET)
-static void
+void
oce_rx_flush_lro(struct oce_rq *rq)
{
struct lro_ctrl *lro = &rq->lro;
@@ -1553,27 +1894,30 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
int nsegs, added = 0;
struct oce_nic_rqe *rqe;
pd_rxulp_db_t rxdb_reg;
+ uint32_t val = 0;
+ uint32_t oce_max_rq_posts = 64;
bzero(&rxdb_reg, sizeof(pd_rxulp_db_t));
for (i = 0; i < count; i++) {
- in = rq->packets_in + 1;
- if (in == OCE_RQ_PACKET_ARRAY_SIZE)
- in = 0;
- if (in == rq->packets_out)
- break; /* no more room */
-
- pd = &rq->pckts[rq->packets_in];
- pd->mbuf = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
- if (pd->mbuf == NULL)
+ in = (rq->ring->pidx + 1) % OCE_RQ_PACKET_ARRAY_SIZE;
+
+ pd = &rq->pckts[rq->ring->pidx];
+ pd->mbuf = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, oce_rq_buf_size);
+ if (pd->mbuf == NULL) {
+ device_printf(sc->dev, "mbuf allocation failed, size = %d\n",oce_rq_buf_size);
break;
+ }
+ pd->mbuf->m_nextpkt = NULL;
+
+ pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = rq->cfg.frag_size;
- pd->mbuf->m_len = pd->mbuf->m_pkthdr.len = MCLBYTES;
rc = bus_dmamap_load_mbuf_sg(rq->tag,
pd->map,
pd->mbuf,
segs, &nsegs, BUS_DMA_NOWAIT);
if (rc) {
m_free(pd->mbuf);
+ device_printf(sc->dev, "bus_dmamap_load_mbuf_sg failed rc = %d\n", rc);
break;
}
@@ -1582,7 +1926,6 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
continue;
}
- rq->packets_in = in;
bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_PREREAD);
rqe = RING_GET_PRODUCER_ITEM_VA(rq->ring, struct oce_nic_rqe);
@@ -1593,23 +1936,124 @@ oce_alloc_rx_bufs(struct oce_rq *rq, int count)
added++;
rq->pending++;
}
+ oce_max_rq_posts = sc->enable_hwlro ? OCE_HWLRO_MAX_RQ_POSTS : OCE_MAX_RQ_POSTS;
if (added != 0) {
- for (i = added / OCE_MAX_RQ_POSTS; i > 0; i--) {
- rxdb_reg.bits.num_posted = OCE_MAX_RQ_POSTS;
+ for (i = added / oce_max_rq_posts; i > 0; i--) {
+ rxdb_reg.bits.num_posted = oce_max_rq_posts;
rxdb_reg.bits.qid = rq->rq_id;
- OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
- added -= OCE_MAX_RQ_POSTS;
+ if(rq->islro) {
+ val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
+ val |= oce_max_rq_posts << 16;
+ OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
+ }else {
+ OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
+ }
+ added -= oce_max_rq_posts;
}
if (added > 0) {
rxdb_reg.bits.qid = rq->rq_id;
rxdb_reg.bits.num_posted = added;
- OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
+ if(rq->islro) {
+ val |= rq->rq_id & DB_LRO_RQ_ID_MASK;
+ val |= added << 16;
+ OCE_WRITE_REG32(sc, db, DB_OFFSET, val);
+ }else {
+ OCE_WRITE_REG32(sc, db, PD_RXULP_DB, rxdb_reg.dw0);
+ }
}
}
return 0;
}
+static void
+oce_check_rx_bufs(POCE_SOFTC sc, uint32_t num_cqes, struct oce_rq *rq)
+{
+ if (num_cqes) {
+ oce_arm_cq(sc, rq->cq->cq_id, num_cqes, FALSE);
+ if(!sc->enable_hwlro) {
+ if((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) > 1)
+ oce_alloc_rx_bufs(rq, ((OCE_RQ_PACKET_ARRAY_SIZE - rq->pending) - 1));
+ }else {
+ if ((OCE_RQ_PACKET_ARRAY_SIZE -1 - rq->pending) > 64)
+ oce_alloc_rx_bufs(rq, 64);
+ }
+ }
+
+ return;
+}
+
+uint16_t
+oce_rq_handler_lro(void *arg)
+{
+ struct oce_rq *rq = (struct oce_rq *)arg;
+ struct oce_cq *cq = rq->cq;
+ POCE_SOFTC sc = rq->parent;
+ struct nic_hwlro_singleton_cqe *cqe;
+ struct nic_hwlro_cqe_part2 *cqe2;
+ int num_cqes = 0;
+
+ LOCK(&rq->rx_lock);
+ bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
+ while (cqe->valid) {
+ if(cqe->cqe_type == 0) { /* singleton cqe */
+ /* we should not get singleton cqe after cqe1 on same rq */
+ if(rq->cqe_firstpart != NULL) {
+ device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
+ goto exit_rq_handler_lro;
+ }
+ if(cqe->error != 0) {
+ rq->rx_stats.rxcp_err++;
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
+ }
+ oce_rx_lro(rq, cqe, NULL);
+ rq->rx_stats.rx_compl++;
+ cqe->valid = 0;
+ RING_GET(cq->ring, 1);
+ num_cqes++;
+ if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
+ break;
+ }else if(cqe->cqe_type == 0x1) { /* first part */
+ /* we should not get cqe1 after cqe1 on same rq */
+ if(rq->cqe_firstpart != NULL) {
+ device_printf(sc->dev, "Got cqe1 after cqe1 \n");
+ goto exit_rq_handler_lro;
+ }
+ rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
+ RING_GET(cq->ring, 1);
+ }else if(cqe->cqe_type == 0x2) { /* second part */
+ cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
+ if(cqe2->error != 0) {
+ rq->rx_stats.rxcp_err++;
+ if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
+ }
+ /* We should not get cqe2 without cqe1 */
+ if(rq->cqe_firstpart == NULL) {
+ device_printf(sc->dev, "Got cqe2 without cqe1 \n");
+ goto exit_rq_handler_lro;
+ }
+ oce_rx_lro(rq, (struct nic_hwlro_singleton_cqe *)rq->cqe_firstpart, cqe2);
+
+ rq->rx_stats.rx_compl++;
+ rq->cqe_firstpart->valid = 0;
+ cqe2->valid = 0;
+ rq->cqe_firstpart = NULL;
+
+ RING_GET(cq->ring, 1);
+ num_cqes += 2;
+ if (num_cqes >= (IS_XE201(sc) ? 8 : oce_max_rsp_handled))
+ break;
+ }
+
+ bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
+ }
+ oce_check_rx_bufs(sc, num_cqes, rq);
+exit_rq_handler_lro:
+ UNLOCK(&rq->rx_lock);
+ return 0;
+}
/* Handle the Completion Queue for receive */
uint16_t
@@ -1619,23 +2063,26 @@ oce_rq_handler(void *arg)
struct oce_cq *cq = rq->cq;
POCE_SOFTC sc = rq->parent;
struct oce_nic_rx_cqe *cqe;
- int num_cqes = 0, rq_buffers_used = 0;
-
+ int num_cqes = 0;
+ if(rq->islro) {
+ oce_rq_handler_lro(arg);
+ return 0;
+ }
+ LOCK(&rq->rx_lock);
bus_dmamap_sync(cq->ring->dma.tag,
cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
while (cqe->u0.dw[2]) {
DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
- RING_GET(rq->ring, 1);
if (cqe->u0.s.error == 0) {
- oce_rx(rq, cqe->u0.s.frag_index, cqe);
+ oce_rx(rq, cqe);
} else {
rq->rx_stats.rxcp_err++;
if_inc_counter(sc->ifp, IFCOUNTER_IERRORS, 1);
/* Post L3/L4 errors to stack.*/
- oce_rx(rq, cqe->u0.s.frag_index, cqe);
+ oce_rx(rq, cqe);
}
rq->rx_stats.rx_compl++;
cqe->u0.dw[2] = 0;
@@ -1657,17 +2104,12 @@ oce_rq_handler(void *arg)
}
#if defined(INET6) || defined(INET)
- if (IF_LRO_ENABLED(sc))
- oce_rx_flush_lro(rq);
+ if (IF_LRO_ENABLED(sc))
+ oce_rx_flush_lro(rq);
#endif
-
- if (num_cqes) {
- oce_arm_cq(sc, cq->cq_id, num_cqes, FALSE);
- rq_buffers_used = OCE_RQ_PACKET_ARRAY_SIZE - rq->pending;
- if (rq_buffers_used > 1)
- oce_alloc_rx_bufs(rq, (rq_buffers_used - 1));
- }
+ oce_check_rx_bufs(sc, num_cqes, rq);
+ UNLOCK(&rq->rx_lock);
return 0;
}
@@ -1896,45 +2338,53 @@ oce_eqd_set_periodic(POCE_SOFTC sc)
struct oce_eq *eqo;
uint64_t now = 0, delta;
int eqd, i, num = 0;
- uint32_t ips = 0;
- int tps;
+ uint32_t tx_reqs = 0, rxpkts = 0, pps;
+ struct oce_wq *wq;
+ struct oce_rq *rq;
+
+ #define ticks_to_msecs(t) (1000 * (t) / hz)
for (i = 0 ; i < sc->neqs; i++) {
eqo = sc->eq[i];
aic = &sc->aic_obj[i];
/* When setting the static eq delay from the user space */
if (!aic->enable) {
+ if (aic->ticks)
+ aic->ticks = 0;
eqd = aic->et_eqd;
goto modify_eqd;
}
+ rq = sc->rq[i];
+ rxpkts = rq->rx_stats.rx_pkts;
+ wq = sc->wq[i];
+ tx_reqs = wq->tx_stats.tx_reqs;
now = ticks;
- /* Over flow check */
- if ((now < aic->ticks) || (eqo->intr < aic->intr_prev))
- goto done;
-
- delta = now - aic->ticks;
- tps = delta/hz;
-
- /* Interrupt rate based on elapsed ticks */
- if(tps)
- ips = (uint32_t)(eqo->intr - aic->intr_prev) / tps;
+ if (!aic->ticks || now < aic->ticks ||
+ rxpkts < aic->prev_rxpkts || tx_reqs < aic->prev_txreqs) {
+ aic->prev_rxpkts = rxpkts;
+ aic->prev_txreqs = tx_reqs;
+ aic->ticks = now;
+ continue;
+ }
- if (ips > INTR_RATE_HWM)
- eqd = aic->cur_eqd + 20;
- else if (ips < INTR_RATE_LWM)
- eqd = aic->cur_eqd / 2;
- else
- goto done;
+ delta = ticks_to_msecs(now - aic->ticks);
- if (eqd < 10)
+ pps = (((uint32_t)(rxpkts - aic->prev_rxpkts) * 1000) / delta) +
+ (((uint32_t)(tx_reqs - aic->prev_txreqs) * 1000) / delta);
+ eqd = (pps / 15000) << 2;
+ if (eqd < 8)
eqd = 0;
/* Make sure that the eq delay is in the known range */
eqd = min(eqd, aic->max_eqd);
eqd = max(eqd, aic->min_eqd);
+ aic->prev_rxpkts = rxpkts;
+ aic->prev_txreqs = tx_reqs;
+ aic->ticks = now;
+
modify_eqd:
if (eqd != aic->cur_eqd) {
set_eqd[num].delay_multiplier = (eqd * 65)/100;
@@ -1942,14 +2392,16 @@ modify_eqd:
aic->cur_eqd = eqd;
num++;
}
-done:
- aic->intr_prev = eqo->intr;
- aic->ticks = now;
}
/* Is there atleast one eq that needs to be modified? */
- if(num)
- oce_mbox_eqd_modify_periodic(sc, set_eqd, num);
+ for(i = 0; i < num; i += 8) {
+ if((num - i) >=8 )
+ oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], 8);
+ else
+ oce_mbox_eqd_modify_periodic(sc, &set_eqd[i], (num - i));
+ }
+
}
static void oce_detect_hw_error(POCE_SOFTC sc)
@@ -2037,6 +2489,44 @@ oce_local_timer(void *arg)
callout_reset(&sc->timer, hz, oce_local_timer, sc);
}
+static void
+oce_tx_compl_clean(POCE_SOFTC sc)
+{
+ struct oce_wq *wq;
+ int i = 0, timeo = 0, num_wqes = 0;
+ int pending_txqs = sc->nwqs;
+
+ /* Stop polling for compls when HW has been silent for 10ms or
+ * hw_error or no outstanding completions expected
+ */
+ do {
+ pending_txqs = sc->nwqs;
+
+ for_all_wq_queues(sc, wq, i) {
+ num_wqes = oce_wq_handler(wq);
+
+ if(num_wqes)
+ timeo = 0;
+
+ if(!wq->ring->num_used)
+ pending_txqs--;
+ }
+
+ if (pending_txqs == 0 || ++timeo > 10 || sc->hw_error)
+ break;
+
+ DELAY(1000);
+ } while (TRUE);
+
+ for_all_wq_queues(sc, wq, i) {
+ while(wq->ring->num_used) {
+ LOCK(&wq->tx_compl_lock);
+ oce_process_tx_completion(wq);
+ UNLOCK(&wq->tx_compl_lock);
+ }
+ }
+
+}
/* NOTE : This should only be called holding
* DEVICE_LOCK.
@@ -2044,28 +2534,14 @@ oce_local_timer(void *arg)
static void
oce_if_deactivate(POCE_SOFTC sc)
{
- int i, mtime = 0;
- int wait_req = 0;
+ int i;
struct oce_rq *rq;
struct oce_wq *wq;
struct oce_eq *eq;
sc->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
- /*Wait for max of 400ms for TX completions to be done */
- while (mtime < 400) {
- wait_req = 0;
- for_all_wq_queues(sc, wq, i) {
- if (wq->ring->num_used) {
- wait_req = 1;
- DELAY(1);
- break;
- }
- }
- mtime += 1;
- if (!wait_req)
- break;
- }
+ oce_tx_compl_clean(sc);
/* Stop intrs and finish any bottom halves pending */
oce_hw_intr_disable(sc);
@@ -2152,6 +2628,50 @@ process_link_state(POCE_SOFTC sc, struct oce_async_cqe_link_state *acqe)
}
+static void oce_async_grp5_osbmc_process(POCE_SOFTC sc,
+ struct oce_async_evt_grp5_os2bmc *evt)
+{
+ DW_SWAP(evt, sizeof(struct oce_async_evt_grp5_os2bmc));
+ if (evt->u.s.mgmt_enable)
+ sc->flags |= OCE_FLAGS_OS2BMC;
+ else
+ return;
+
+ sc->bmc_filt_mask = evt->u.s.arp_filter;
+ sc->bmc_filt_mask |= (evt->u.s.dhcp_client_filt << 1);
+ sc->bmc_filt_mask |= (evt->u.s.dhcp_server_filt << 2);
+ sc->bmc_filt_mask |= (evt->u.s.net_bios_filt << 3);
+ sc->bmc_filt_mask |= (evt->u.s.bcast_filt << 4);
+ sc->bmc_filt_mask |= (evt->u.s.ipv6_nbr_filt << 5);
+ sc->bmc_filt_mask |= (evt->u.s.ipv6_ra_filt << 6);
+ sc->bmc_filt_mask |= (evt->u.s.ipv6_ras_filt << 7);
+ sc->bmc_filt_mask |= (evt->u.s.mcast_filt << 8);
+}
+
+
+static void oce_process_grp5_events(POCE_SOFTC sc, struct oce_mq_cqe *cqe)
+{
+ struct oce_async_event_grp5_pvid_state *gcqe;
+ struct oce_async_evt_grp5_os2bmc *bmccqe;
+
+ switch (cqe->u0.s.async_type) {
+ case ASYNC_EVENT_PVID_STATE:
+ /* GRP5 PVID */
+ gcqe = (struct oce_async_event_grp5_pvid_state *)cqe;
+ if (gcqe->enabled)
+ sc->pvid = gcqe->tag & VLAN_VID_MASK;
+ else
+ sc->pvid = 0;
+ break;
+ case ASYNC_EVENT_OS2BMC:
+ bmccqe = (struct oce_async_evt_grp5_os2bmc *)cqe;
+ oce_async_grp5_osbmc_process(sc, bmccqe);
+ break;
+ default:
+ break;
+ }
+}
+
/* Handle the Completion Queue for the Mailbox/Async notifications */
uint16_t
oce_mq_handler(void *arg)
@@ -2162,7 +2682,6 @@ oce_mq_handler(void *arg)
int num_cqes = 0, evt_type = 0, optype = 0;
struct oce_mq_cqe *cqe;
struct oce_async_cqe_link_state *acqe;
- struct oce_async_event_grp5_pvid_state *gcqe;
struct oce_async_event_qnq *dbgcqe;
@@ -2179,21 +2698,11 @@ oce_mq_handler(void *arg)
/* Link status evt */
acqe = (struct oce_async_cqe_link_state *)cqe;
process_link_state(sc, acqe);
- } else if ((evt_type == ASYNC_EVENT_GRP5) &&
- (optype == ASYNC_EVENT_PVID_STATE)) {
- /* GRP5 PVID */
- gcqe =
- (struct oce_async_event_grp5_pvid_state *)cqe;
- if (gcqe->enabled)
- sc->pvid = gcqe->tag & VLAN_VID_MASK;
- else
- sc->pvid = 0;
-
- }
- else if(evt_type == ASYNC_EVENT_CODE_DEBUG &&
- optype == ASYNC_EVENT_DEBUG_QNQ) {
- dbgcqe =
- (struct oce_async_event_qnq *)cqe;
+ } else if (evt_type == ASYNC_EVENT_GRP5) {
+ oce_process_grp5_events(sc, cqe);
+ } else if (evt_type == ASYNC_EVENT_CODE_DEBUG &&
+ optype == ASYNC_EVENT_DEBUG_QNQ) {
+ dbgcqe = (struct oce_async_event_qnq *)cqe;
if(dbgcqe->valid)
sc->qnqid = dbgcqe->vlan_tag;
sc->qnq_debug_event = TRUE;
@@ -2303,7 +2812,8 @@ oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
if(sc->pvid) {
if(!vlan_tag)
vlan_tag = sc->pvid;
- *complete = FALSE;
+ if (complete)
+ *complete = FALSE;
}
if(vlan_tag) {
@@ -2312,7 +2822,9 @@ oce_insert_vlan_tag(POCE_SOFTC sc, struct mbuf *m, boolean_t *complete)
if(sc->qnqid) {
m = ether_vlanencap(m, sc->qnqid);
- *complete = FALSE;
+
+ if (complete)
+ *complete = FALSE;
}
return m;
}
@@ -2353,3 +2865,129 @@ oce_get_config(POCE_SOFTC sc)
sc->max_vlans = MAX_VLANFILTER_SIZE;
}
}
+
+static void
+oce_rdma_close(void)
+{
+ if (oce_rdma_if != NULL) {
+ oce_rdma_if = NULL;
+ }
+}
+
+static void
+oce_get_mac_addr(POCE_SOFTC sc, uint8_t *macaddr)
+{
+ memcpy(macaddr, sc->macaddr.mac_addr, 6);
+}
+
+int
+oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if)
+{
+ POCE_SOFTC sc;
+ struct oce_dev_info di;
+ int i;
+
+ if ((rdma_info == NULL) || (rdma_if == NULL)) {
+ return -EINVAL;
+ }
+
+ if ((rdma_info->size != OCE_RDMA_INFO_SIZE) ||
+ (rdma_if->size != OCE_RDMA_IF_SIZE)) {
+ return -ENXIO;
+ }
+
+ rdma_info->close = oce_rdma_close;
+ rdma_info->mbox_post = oce_mbox_post;
+ rdma_info->common_req_hdr_init = mbx_common_req_hdr_init;
+ rdma_info->get_mac_addr = oce_get_mac_addr;
+
+ oce_rdma_if = rdma_if;
+
+ sc = softc_head;
+ while (sc != NULL) {
+ if (oce_rdma_if->announce != NULL) {
+ memset(&di, 0, sizeof(di));
+ di.dev = sc->dev;
+ di.softc = sc;
+ di.ifp = sc->ifp;
+ di.db_bhandle = sc->db_bhandle;
+ di.db_btag = sc->db_btag;
+ di.db_page_size = 4096;
+ if (sc->flags & OCE_FLAGS_USING_MSIX) {
+ di.intr_mode = OCE_INTERRUPT_MODE_MSIX;
+ } else if (sc->flags & OCE_FLAGS_USING_MSI) {
+ di.intr_mode = OCE_INTERRUPT_MODE_MSI;
+ } else {
+ di.intr_mode = OCE_INTERRUPT_MODE_INTX;
+ }
+ di.dev_family = OCE_GEN2_FAMILY; // fixme: must detect skyhawk
+ if (di.intr_mode != OCE_INTERRUPT_MODE_INTX) {
+ di.msix.num_vectors = sc->intr_count + sc->roce_intr_count;
+ di.msix.start_vector = sc->intr_count;
+ for (i=0; i<di.msix.num_vectors; i++) {
+ di.msix.vector_list[i] = sc->intrs[i].vector;
+ }
+ } else {
+ }
+ memcpy(di.mac_addr, sc->macaddr.mac_addr, 6);
+ di.vendor_id = pci_get_vendor(sc->dev);
+ di.dev_id = pci_get_device(sc->dev);
+
+ if (sc->rdma_flags & OCE_RDMA_FLAG_SUPPORTED) {
+ di.flags |= OCE_RDMA_INFO_RDMA_SUPPORTED;
+ }
+
+ rdma_if->announce(&di);
+ sc = sc->next;
+ }
+ }
+
+ return 0;
+}
+
+static void
+oce_read_env_variables( POCE_SOFTC sc )
+{
+ char *value = NULL;
+ int rc = 0;
+
+ /* read if user wants to enable hwlro or swlro */
+ //value = getenv("oce_enable_hwlro");
+ if(value && IS_SH(sc)) {
+ sc->enable_hwlro = strtol(value, NULL, 10);
+ if(sc->enable_hwlro) {
+ rc = oce_mbox_nic_query_lro_capabilities(sc, NULL, NULL);
+ if(rc) {
+ device_printf(sc->dev, "no hardware lro support\n");
+ device_printf(sc->dev, "software lro enabled\n");
+ sc->enable_hwlro = 0;
+ }else {
+ device_printf(sc->dev, "hardware lro enabled\n");
+ oce_max_rsp_handled = 32;
+ }
+ }else {
+ device_printf(sc->dev, "software lro enabled\n");
+ }
+ }else {
+ sc->enable_hwlro = 0;
+ }
+
+ /* read mbuf size */
+ //value = getenv("oce_rq_buf_size");
+ if(value && IS_SH(sc)) {
+ oce_rq_buf_size = strtol(value, NULL, 10);
+ switch(oce_rq_buf_size) {
+ case 2048:
+ case 4096:
+ case 9216:
+ case 16384:
+ break;
+
+ default:
+ device_printf(sc->dev, " Supported oce_rq_buf_size values are 2K, 4K, 9K, 16K \n");
+ oce_rq_buf_size = 2048;
+ }
+ }
+
+ return;
+}
diff --git a/sys/dev/oce/oce_if.h b/sys/dev/oce/oce_if.h
index 99707e496783..d6c1a0a454d4 100644
--- a/sys/dev/oce/oce_if.h
+++ b/sys/dev/oce/oce_if.h
@@ -85,13 +85,14 @@
#include <netinet/tcp.h>
#include <netinet/sctp.h>
#include <netinet/tcp_lro.h>
+#include <netinet/icmp6.h>
#include <machine/bus.h>
#include "oce_hw.h"
/* OCE device driver module component revision informaiton */
-#define COMPONENT_REVISION "10.0.664.0"
+#define COMPONENT_REVISION "11.0.50.0"
/* OCE devices supported by this driver */
#define PCI_VENDOR_EMULEX 0x10df /* Emulex */
@@ -142,7 +143,6 @@ extern int mp_ncpus; /* system's total active cpu cores */
#define OCE_DEFAULT_WQ_EQD 16
#define OCE_MAX_PACKET_Q 16
-#define OCE_RQ_BUF_SIZE 2048
#define OCE_LSO_MAX_SIZE (64 * 1024)
#define LONG_TIMEOUT 30
#define OCE_MAX_JUMBO_FRAME_SIZE 9018
@@ -150,11 +150,15 @@ extern int mp_ncpus; /* system's total active cpu cores */
ETHER_VLAN_ENCAP_LEN - \
ETHER_HDR_LEN)
+#define OCE_RDMA_VECTORS 2
+
#define OCE_MAX_TX_ELEMENTS 29
#define OCE_MAX_TX_DESC 1024
#define OCE_MAX_TX_SIZE 65535
+#define OCE_MAX_TSO_SIZE (65535 - ETHER_HDR_LEN)
#define OCE_MAX_RX_SIZE 4096
#define OCE_MAX_RQ_POSTS 255
+#define OCE_HWLRO_MAX_RQ_POSTS 64
#define OCE_DEFAULT_PROMISCUOUS 0
@@ -503,7 +507,7 @@ struct oce_drv_stats {
#define INTR_RATE_LWM 10000
#define OCE_MAX_EQD 128u
-#define OCE_MIN_EQD 50u
+#define OCE_MIN_EQD 0u
struct oce_set_eqd {
uint32_t eq_id;
@@ -518,7 +522,8 @@ struct oce_aic_obj { /* Adaptive interrupt coalescing (AIC) info */
uint32_t cur_eqd; /* in usecs */
uint32_t et_eqd; /* configured value when aic is off */
uint64_t ticks;
- uint64_t intr_prev;
+ uint64_t prev_rxpkts;
+ uint64_t prev_txreqs;
};
#define MAX_LOCK_DESC_LEN 32
@@ -609,7 +614,8 @@ struct oce_eq {
enum cq_len {
CQ_LEN_256 = 256,
CQ_LEN_512 = 512,
- CQ_LEN_1024 = 1024
+ CQ_LEN_1024 = 1024,
+ CQ_LEN_2048 = 2048
};
struct cq_config {
@@ -685,6 +691,7 @@ struct oce_tx_queue_stats {
struct oce_wq {
OCE_LOCK tx_lock;
+ OCE_LOCK tx_compl_lock;
void *parent;
oce_ring_buffer_t *ring;
struct oce_cq *cq;
@@ -730,6 +737,7 @@ struct oce_rx_queue_stats {
uint32_t rx_frags;
uint32_t prev_rx_frags;
uint32_t rx_fps;
+ uint32_t rx_drops_no_frags; /* HW has no fetched frags */
};
@@ -744,8 +752,6 @@ struct oce_rq {
void *pad1;
bus_dma_tag_t tag;
struct oce_packet_desc pckts[OCE_RQ_PACKET_ARRAY_SIZE];
- uint32_t packets_in;
- uint32_t packets_out;
uint32_t pending;
#ifdef notdef
struct mbuf *head;
@@ -757,6 +763,8 @@ struct oce_rq {
struct oce_rx_queue_stats rx_stats;
struct lro_ctrl lro;
int lro_pkts_queued;
+ int islro;
+ struct nic_hwlro_cqe_part1 *cqe_firstpart;
};
@@ -781,6 +789,7 @@ struct link_status {
#define OCE_FLAGS_XE201 0x00000400
#define OCE_FLAGS_BE2 0x00000800
#define OCE_FLAGS_SH 0x00001000
+#define OCE_FLAGS_OS2BMC 0x00002000
#define OCE_DEV_BE2_CFG_BAR 1
#define OCE_DEV_CFG_BAR 0
@@ -815,6 +824,7 @@ typedef struct oce_softc {
OCE_INTR_INFO intrs[OCE_MAX_EQ];
int intr_count;
+ int roce_intr_count;
struct ifnet *ifp;
@@ -824,6 +834,7 @@ typedef struct oce_softc {
uint8_t duplex;
uint32_t qos_link_speed;
uint32_t speed;
+ uint32_t enable_hwlro;
char fw_version[32];
struct mac_address_format macaddr;
@@ -881,9 +892,15 @@ typedef struct oce_softc {
uint16_t qnqid;
uint32_t pvid;
uint32_t max_vlans;
+ uint32_t bmc_filt_mask;
+
+ void *rdma_context;
+ uint32_t rdma_flags;
+ struct oce_softc *next;
} OCE_SOFTC, *POCE_SOFTC;
+#define OCE_RDMA_FLAG_SUPPORTED 0x00000001
/**************************************************
@@ -933,7 +950,7 @@ typedef struct oce_softc {
: (bus_space_write_1((sc)->devcfg_btag, \
(sc)->devcfg_bhandle,o,v)))
-
+void oce_rx_flush_lro(struct oce_rq *rq);
/***********************************************************
* DMA memory functions
***********************************************************/
@@ -983,6 +1000,9 @@ uint32_t oce_page_list(oce_ring_buffer_t *ring, struct phys_addr *pa_list);
* cleanup functions
***********************************************************/
void oce_stop_rx(POCE_SOFTC sc);
+void oce_discard_rx_comp(struct oce_rq *rq, int num_frags);
+void oce_rx_cq_clean(struct oce_rq *rq);
+void oce_rx_cq_clean_hwlro(struct oce_rq *rq);
void oce_intr_free(POCE_SOFTC sc);
void oce_free_posted_rxbuf(struct oce_rq *rq);
#if defined(INET6) || defined(INET)
@@ -1015,7 +1035,8 @@ int oce_rxf_set_promiscuous(POCE_SOFTC sc, uint8_t enable);
int oce_set_common_iface_rx_filter(POCE_SOFTC sc, POCE_DMA_MEM sgl);
int oce_get_link_status(POCE_SOFTC sc, struct link_status *link);
int oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
-int oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
+int oce_mbox_get_nic_stats_v1(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
+int oce_mbox_get_nic_stats_v2(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem);
int oce_mbox_get_pport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
uint32_t reset_stats);
int oce_mbox_get_vport_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem,
@@ -1086,10 +1107,16 @@ int oce_refresh_nic_stats(POCE_SOFTC sc);
int oce_stats_init(POCE_SOFTC sc);
void oce_stats_free(POCE_SOFTC sc);
+/* hw lro functions */
+int oce_mbox_nic_query_lro_capabilities(POCE_SOFTC sc, uint32_t *lro_rq_cnt, uint32_t *lro_flags);
+int oce_mbox_nic_set_iface_lro_config(POCE_SOFTC sc, int enable);
+int oce_mbox_create_rq_v2(struct oce_rq *rq);
+
/* Capabilities */
#define OCE_MODCAP_RSS 1
#define OCE_MAX_RSP_HANDLED 64
extern uint32_t oce_max_rsp_handled; /* max responses */
+extern uint32_t oce_rq_buf_size;
#define OCE_MAC_LOOPBACK 0x0
#define OCE_PHY_LOOPBACK 0x1
@@ -1159,3 +1186,80 @@ static inline int MPU_EP_SEMAPHORE(POCE_SOFTC sc)
#define IS_QNQ_OR_UMC(sc) ((sc->pvid && (sc->function_mode & FNM_UMC_MODE ))\
|| (sc->qnqid && (sc->function_mode & FNM_FLEX10_MODE)))
+struct oce_rdma_info;
+extern struct oce_rdma_if *oce_rdma_if;
+
+
+
+/* OS2BMC related */
+
+#define DHCP_CLIENT_PORT 68
+#define DHCP_SERVER_PORT 67
+#define NET_BIOS_PORT1 137
+#define NET_BIOS_PORT2 138
+#define DHCPV6_RAS_PORT 547
+
+#define BMC_FILT_BROADCAST_ARP ((uint32_t)(1))
+#define BMC_FILT_BROADCAST_DHCP_CLIENT ((uint32_t)(1 << 1))
+#define BMC_FILT_BROADCAST_DHCP_SERVER ((uint32_t)(1 << 2))
+#define BMC_FILT_BROADCAST_NET_BIOS ((uint32_t)(1 << 3))
+#define BMC_FILT_BROADCAST ((uint32_t)(1 << 4))
+#define BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER ((uint32_t)(1 << 5))
+#define BMC_FILT_MULTICAST_IPV6_RA ((uint32_t)(1 << 6))
+#define BMC_FILT_MULTICAST_IPV6_RAS ((uint32_t)(1 << 7))
+#define BMC_FILT_MULTICAST ((uint32_t)(1 << 8))
+
+#define ND_ROUTER_ADVERT 134
+#define ND_NEIGHBOR_ADVERT 136
+
+#define is_mc_allowed_on_bmc(sc, eh) \
+ (!is_multicast_filt_enabled(sc) && \
+ ETHER_IS_MULTICAST(eh->ether_dhost) && \
+ !ETHER_IS_BROADCAST(eh->ether_dhost))
+
+#define is_bc_allowed_on_bmc(sc, eh) \
+ (!is_broadcast_filt_enabled(sc) && \
+ ETHER_IS_BROADCAST(eh->ether_dhost))
+
+#define is_arp_allowed_on_bmc(sc, et) \
+ (is_arp(et) && is_arp_filt_enabled(sc))
+
+#define is_arp(et) (et == ETHERTYPE_ARP)
+
+#define is_arp_filt_enabled(sc) \
+ (sc->bmc_filt_mask & (BMC_FILT_BROADCAST_ARP))
+
+#define is_dhcp_client_filt_enabled(sc) \
+ (sc->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_CLIENT)
+
+#define is_dhcp_srvr_filt_enabled(sc) \
+ (sc->bmc_filt_mask & BMC_FILT_BROADCAST_DHCP_SERVER)
+
+#define is_nbios_filt_enabled(sc) \
+ (sc->bmc_filt_mask & BMC_FILT_BROADCAST_NET_BIOS)
+
+#define is_ipv6_na_filt_enabled(sc) \
+ (sc->bmc_filt_mask & \
+ BMC_FILT_MULTICAST_IPV6_NEIGH_ADVER)
+
+#define is_ipv6_ra_filt_enabled(sc) \
+ (sc->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RA)
+
+#define is_ipv6_ras_filt_enabled(sc) \
+ (sc->bmc_filt_mask & BMC_FILT_MULTICAST_IPV6_RAS)
+
+#define is_broadcast_filt_enabled(sc) \
+ (sc->bmc_filt_mask & BMC_FILT_BROADCAST)
+
+#define is_multicast_filt_enabled(sc) \
+ (sc->bmc_filt_mask & BMC_FILT_MULTICAST)
+
+#define is_os2bmc_enabled(sc) (sc->flags & OCE_FLAGS_OS2BMC)
+
+#define LRO_FLAGS_HASH_MODE 0x00000001
+#define LRO_FLAGS_RSS_MODE 0x00000004
+#define LRO_FLAGS_CLSC_IPV4 0x00000010
+#define LRO_FLAGS_CLSC_IPV6 0x00000020
+#define NIC_RQ_FLAGS_RSS 0x0001
+#define NIC_RQ_FLAGS_LRO 0x0020
+
diff --git a/sys/dev/oce/oce_mbox.c b/sys/dev/oce/oce_mbox.c
index cb2ae81a013d..3c303b5ae17f 100644
--- a/sys/dev/oce/oce_mbox.c
+++ b/sys/dev/oce/oce_mbox.c
@@ -495,6 +495,10 @@ oce_get_fw_config(POCE_SOFTC sc)
sc->asic_revision = HOST_32(fwcmd->params.rsp.asic_revision);
sc->port_id = HOST_32(fwcmd->params.rsp.port_id);
sc->function_mode = HOST_32(fwcmd->params.rsp.function_mode);
+ if ((sc->function_mode & (ULP_NIC_MODE | ULP_RDMA_MODE)) ==
+ (ULP_NIC_MODE | ULP_RDMA_MODE)) {
+ sc->rdma_flags = OCE_RDMA_FLAG_SUPPORTED;
+ }
sc->function_caps = HOST_32(fwcmd->params.rsp.function_caps);
if (fwcmd->params.rsp.ulp[0].ulp_mode & ULP_NIC_MODE) {
@@ -767,7 +771,7 @@ oce_rss_itbl_init(POCE_SOFTC sc, struct mbx_config_nic_rss *fwcmd)
/* fill log2 value indicating the size of the CPU table */
if (rc == 0)
- fwcmd->params.req.cpu_tbl_sz_log2 = LE_16(OCE_LOG2(i));
+ fwcmd->params.req.cpu_tbl_sz_log2 = LE_16(OCE_LOG2(INDIRECTION_TABLE_ENTRIES));
return rc;
}
@@ -808,9 +812,15 @@ oce_config_nic_rss(POCE_SOFTC sc, uint32_t if_id, uint16_t enable_rss)
RSS_ENABLE_TCP_IPV4 |
RSS_ENABLE_IPV6 |
RSS_ENABLE_TCP_IPV6);
- fwcmd->params.req.flush = OCE_FLUSH;
+
+ if(!sc->enable_hwlro)
+ fwcmd->params.req.flush = OCE_FLUSH;
+ else
+ fwcmd->params.req.flush = 0;
+
fwcmd->params.req.if_id = LE_32(if_id);
+ srandom(arc4random()); /* random entropy seed */
read_random(fwcmd->params.req.hash, sizeof(fwcmd->params.req.hash));
rc = oce_rss_itbl_init(sc, fwcmd);
@@ -864,7 +874,7 @@ oce_rxf_set_promiscuous(POCE_SOFTC sc, uint8_t enable)
req->iface_flags = MBX_RX_IFACE_FLAGS_PROMISCUOUS;
if (enable & 0x02)
- req->iface_flags = MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
+ req->iface_flags |= MBX_RX_IFACE_FLAGS_VLAN_PROMISCUOUS;
req->if_id = sc->if_id;
@@ -968,105 +978,59 @@ error:
}
-
-int
-oce_mbox_get_nic_stats_v0(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
-{
- struct oce_mbx mbx;
- struct mbx_get_nic_stats_v0 *fwcmd;
- int rc = 0;
-
- bzero(&mbx, sizeof(struct oce_mbx));
-
- fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v0);
- bzero(fwcmd, sizeof(struct mbx_get_nic_stats_v0));
-
- mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
- MBX_SUBSYSTEM_NIC,
- NIC_GET_STATS,
- MBX_TIMEOUT_SEC,
- sizeof(struct mbx_get_nic_stats_v0),
- OCE_MBX_VER_V0);
-
- mbx.u0.s.embedded = 0;
- mbx.u0.s.sge_count = 1;
-
- oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
-
- mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
- mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
- mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats_v0);
-
- mbx.payload_length = sizeof(struct mbx_get_nic_stats_v0);
-
- DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
-
- rc = oce_mbox_post(sc, &mbx, NULL);
-
- oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
-
- if (!rc)
- rc = fwcmd->hdr.u0.rsp.status;
- if (rc)
- device_printf(sc->dev,
- "%s failed - cmd status: %d addi status: %d\n",
- __FUNCTION__, rc,
- fwcmd->hdr.u0.rsp.additional_status);
- return rc;
-}
-
-
-
/**
* @brief Function to get NIC statistics
- * @param sc software handle to the device
- * @param *stats pointer to where to store statistics
- * @param reset_stats resets statistics of set
- * @returns 0 on success, EIO on failure
- * @note command depricated in Lancer
+ * @param sc software handle to the device
+ * @param *stats pointer to where to store statistics
+ * @param reset_stats resets statistics of set
+ * @returns 0 on success, EIO on failure
+ * @note command depricated in Lancer
*/
-int
-oce_mbox_get_nic_stats(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem)
-{
- struct oce_mbx mbx;
- struct mbx_get_nic_stats *fwcmd;
- int rc = 0;
-
- bzero(&mbx, sizeof(struct oce_mbx));
- fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats);
- bzero(fwcmd, sizeof(struct mbx_get_nic_stats));
-
- mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
- MBX_SUBSYSTEM_NIC,
- NIC_GET_STATS,
- MBX_TIMEOUT_SEC,
- sizeof(struct mbx_get_nic_stats),
- OCE_MBX_VER_V1);
-
-
- mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */
- mbx.u0.s.sge_count = 1; /* using scatter gather instead */
-
- oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE);
- mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr);
- mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr);
- mbx.payload.u0.u1.sgl[0].length = sizeof(struct mbx_get_nic_stats);
-
- mbx.payload_length = sizeof(struct mbx_get_nic_stats);
- DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ);
-
- rc = oce_mbox_post(sc, &mbx, NULL);
- oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE);
- if (!rc)
- rc = fwcmd->hdr.u0.rsp.status;
- if (rc)
- device_printf(sc->dev,
- "%s failed - cmd status: %d addi status: %d\n",
- __FUNCTION__, rc,
- fwcmd->hdr.u0.rsp.additional_status);
- return rc;
+#define OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, version) \
+int \
+oce_mbox_get_nic_stats_v##version(POCE_SOFTC sc, POCE_DMA_MEM pstats_dma_mem) \
+{ \
+ struct oce_mbx mbx; \
+ struct mbx_get_nic_stats_v##version *fwcmd; \
+ int rc = 0; \
+ \
+ bzero(&mbx, sizeof(struct oce_mbx)); \
+ fwcmd = OCE_DMAPTR(pstats_dma_mem, struct mbx_get_nic_stats_v##version); \
+ bzero(fwcmd, sizeof(*fwcmd)); \
+ \
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0, \
+ MBX_SUBSYSTEM_NIC, \
+ NIC_GET_STATS, \
+ MBX_TIMEOUT_SEC, \
+ sizeof(*fwcmd), \
+ OCE_MBX_VER_V##version); \
+ \
+ mbx.u0.s.embedded = 0; /* stats too large for embedded mbx rsp */ \
+ mbx.u0.s.sge_count = 1; /* using scatter gather instead */ \
+ \
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_PREWRITE); \
+ mbx.payload.u0.u1.sgl[0].pa_lo = ADDR_LO(pstats_dma_mem->paddr); \
+ mbx.payload.u0.u1.sgl[0].pa_hi = ADDR_HI(pstats_dma_mem->paddr); \
+ mbx.payload.u0.u1.sgl[0].length = sizeof(*fwcmd); \
+ mbx.payload_length = sizeof(*fwcmd); \
+ DW_SWAP(u32ptr(&mbx), mbx.payload_length + OCE_BMBX_RHDR_SZ); \
+ \
+ rc = oce_mbox_post(sc, &mbx, NULL); \
+ oce_dma_sync(pstats_dma_mem, BUS_DMASYNC_POSTWRITE); \
+ if (!rc) \
+ rc = fwcmd->hdr.u0.rsp.status; \
+ if (rc) \
+ device_printf(sc->dev, \
+ "%s failed - cmd status: %d addi status: %d\n", \
+ __FUNCTION__, rc, \
+ fwcmd->hdr.u0.rsp.additional_status); \
+ return rc; \
}
+OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 0);
+OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 1);
+OCE_MBOX_GET_NIC_STATS(sc, pstats_dma_mem, 2);
+
/**
* @brief Function to get pport (physical port) statistics
@@ -2220,3 +2184,149 @@ error:
return rc;
}
+
+/* hw lro functions */
+
+int
+oce_mbox_nic_query_lro_capabilities(POCE_SOFTC sc, uint32_t *lro_rq_cnt, uint32_t *lro_flags)
+{
+ struct oce_mbx mbx;
+ struct mbx_nic_query_lro_capabilities *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_nic_query_lro_capabilities *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ 0x20,MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_nic_query_lro_capabilities),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_nic_query_lro_capabilities);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,
+ "%s failed - cmd status: %d addi status: %d\n",
+ __FUNCTION__, rc,
+ fwcmd->hdr.u0.rsp.additional_status);
+
+ return rc;
+ }
+ if(lro_flags)
+ *lro_flags = HOST_32(fwcmd->params.rsp.lro_flags);
+
+ if(lro_rq_cnt)
+ *lro_rq_cnt = HOST_16(fwcmd->params.rsp.lro_rq_cnt);
+
+ return rc;
+}
+
+int
+oce_mbox_nic_set_iface_lro_config(POCE_SOFTC sc, int enable)
+{
+ struct oce_mbx mbx;
+ struct mbx_nic_set_iface_lro_config *fwcmd;
+ int rc = 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_nic_set_iface_lro_config *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ 0x26,MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_nic_set_iface_lro_config),
+ OCE_MBX_VER_V0);
+
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_nic_set_iface_lro_config);
+
+ fwcmd->params.req.iface_id = sc->if_id;
+ fwcmd->params.req.lro_flags = 0;
+
+ if(enable) {
+ fwcmd->params.req.lro_flags = LRO_FLAGS_HASH_MODE | LRO_FLAGS_RSS_MODE;
+ fwcmd->params.req.lro_flags |= LRO_FLAGS_CLSC_IPV4 | LRO_FLAGS_CLSC_IPV6;
+
+ fwcmd->params.req.max_clsc_byte_cnt = 64*1024; /* min = 2974, max = 0xfa59 */
+ fwcmd->params.req.max_clsc_seg_cnt = 43; /* min = 2, max = 64 */
+ fwcmd->params.req.max_clsc_usec_delay = 18; /* min = 1, max = 256 */
+ fwcmd->params.req.min_clsc_frame_byte_cnt = 0; /* min = 1, max = 9014 */
+ }
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,
+ "%s failed - cmd status: %d addi status: %d\n",
+ __FUNCTION__, rc,
+ fwcmd->hdr.u0.rsp.additional_status);
+
+ return rc;
+ }
+ return rc;
+}
+
+int
+oce_mbox_create_rq_v2(struct oce_rq *rq)
+{
+ struct oce_mbx mbx;
+ struct mbx_create_nic_rq_v2 *fwcmd;
+ POCE_SOFTC sc = rq->parent;
+ int rc = 0, num_pages = 0;
+
+ if (rq->qstate == QCREATED)
+ return 0;
+
+ bzero(&mbx, sizeof(struct oce_mbx));
+
+ fwcmd = (struct mbx_create_nic_rq_v2 *)&mbx.payload;
+ mbx_common_req_hdr_init(&fwcmd->hdr, 0, 0,
+ MBX_SUBSYSTEM_NIC,
+ 0x08, MBX_TIMEOUT_SEC,
+ sizeof(struct mbx_create_nic_rq_v2),
+ OCE_MBX_VER_V2);
+
+ /* oce_page_list will also prepare pages */
+ num_pages = oce_page_list(rq->ring, &fwcmd->params.req.pages[0]);
+
+ fwcmd->params.req.cq_id = rq->cq->cq_id;
+ fwcmd->params.req.frag_size = rq->cfg.frag_size/2048;
+ fwcmd->params.req.num_pages = num_pages;
+
+ fwcmd->params.req.if_id = sc->if_id;
+
+ fwcmd->params.req.max_frame_size = rq->cfg.mtu;
+ fwcmd->params.req.page_size = 1;
+ if(rq->cfg.is_rss_queue) {
+ fwcmd->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
+ }else {
+ device_printf(sc->dev,
+ "non rss lro queue should not be created \n");
+ goto error;
+ }
+ mbx.u0.s.embedded = 1;
+ mbx.payload_length = sizeof(struct mbx_create_nic_rq_v2);
+
+ rc = oce_mbox_post(sc, &mbx, NULL);
+ if (!rc)
+ rc = fwcmd->hdr.u0.rsp.status;
+ if (rc) {
+ device_printf(sc->dev,
+ "%s failed - cmd status: %d addi status: %d\n",
+ __FUNCTION__, rc,
+ fwcmd->hdr.u0.rsp.additional_status);
+ goto error;
+ }
+ rq->rq_id = HOST_16(fwcmd->params.rsp.rq_id);
+ rq->rss_cpuid = fwcmd->params.rsp.rss_cpuid;
+
+error:
+ return rc;
+}
+
diff --git a/sys/dev/oce/oce_queue.c b/sys/dev/oce/oce_queue.c
index 308c16d54846..e14621e63a3a 100644
--- a/sys/dev/oce/oce_queue.c
+++ b/sys/dev/oce/oce_queue.c
@@ -66,7 +66,7 @@ static struct oce_mq *oce_mq_create(POCE_SOFTC sc,
struct oce_eq *eq, uint32_t q_len);
static void oce_mq_free(struct oce_mq *mq);
static int oce_destroy_q(POCE_SOFTC sc, struct oce_mbx
- *mbx, size_t req_size, enum qtype qtype);
+ *mbx, size_t req_size, enum qtype qtype, int version);
struct oce_cq *oce_cq_create(POCE_SOFTC sc,
struct oce_eq *eq,
uint32_t q_len,
@@ -120,9 +120,10 @@ oce_queue_init_all(POCE_SOFTC sc)
aic->min_eqd = OCE_MIN_EQD;
aic->et_eqd = OCE_MIN_EQD;
aic->enable = TRUE;
+
+ sc->eq[vector] = oce_eq_create(sc, sc->enable_hwlro ? EQ_LEN_2048 : EQ_LEN_1024,
+ EQE_SIZE_4,0, vector);
- sc->eq[vector] = oce_eq_create(sc, EQ_LEN_1024, EQE_SIZE_4,
- 0, vector);
if (!sc->eq[vector])
goto error;
}
@@ -169,6 +170,10 @@ oce_queue_release_all(POCE_SOFTC sc)
struct oce_rq *rq;
struct oce_eq *eq;
+ /* before deleting lro queues, we have to disable hwlro */
+ if(sc->enable_hwlro)
+ oce_mbox_nic_set_iface_lro_config(sc, 0);
+
for_all_rq_queues(sc, rq, i) {
if (rq) {
oce_rq_del(sc->rq[i]);
@@ -254,6 +259,7 @@ oce_wq *oce_wq_init(POCE_SOFTC sc, uint32_t q_len, uint32_t wq_type)
LOCK_CREATE(&wq->tx_lock, "TX_lock");
+ LOCK_CREATE(&wq->tx_compl_lock, "WQ_HANDLER_LOCK");
#if __FreeBSD_version >= 800000
/* Allocate buf ring for multiqueue*/
@@ -304,6 +310,7 @@ oce_wq_free(struct oce_wq *wq)
buf_ring_free(wq->br, M_DEVBUF);
LOCK_DESTROY(&wq->tx_lock);
+ LOCK_DESTROY(&wq->tx_compl_lock);
free(wq, M_DEVBUF);
}
@@ -374,7 +381,7 @@ oce_wq_del(struct oce_wq *wq)
fwcmd = (struct mbx_delete_nic_wq *)&mbx.payload;
fwcmd->params.req.wq_id = wq->wq_id;
(void)oce_destroy_q(sc, &mbx,
- sizeof(struct mbx_delete_nic_wq), QTYPE_WQ);
+ sizeof(struct mbx_delete_nic_wq), QTYPE_WQ, 0);
wq->qstate = QDELETED;
}
@@ -422,20 +429,17 @@ oce_rq *oce_rq_init(POCE_SOFTC sc,
rq->cfg.eqd = 0;
rq->lro_pkts_queued = 0;
rq->cfg.is_rss_queue = rss;
- rq->packets_in = 0;
- rq->packets_out = 0;
rq->pending = 0;
rq->parent = (void *)sc;
rc = bus_dma_tag_create(bus_get_dma_tag(sc->dev),
- 1, 0,
- BUS_SPACE_MAXADDR,
- BUS_SPACE_MAXADDR,
- NULL, NULL,
- OCE_MAX_RX_SIZE,
- 1, PAGE_SIZE, 0, NULL, NULL, &rq->tag);
-
+ 1, 0,
+ BUS_SPACE_MAXADDR,
+ BUS_SPACE_MAXADDR,
+ NULL, NULL,
+ oce_rq_buf_size,
+ 1, oce_rq_buf_size, 0, NULL, NULL, &rq->tag);
if (rc)
goto free_rq;
@@ -512,10 +516,10 @@ oce_rq_create(struct oce_rq *rq, uint32_t if_id, struct oce_eq *eq)
POCE_SOFTC sc = rq->parent;
struct oce_cq *cq;
- cq = oce_cq_create(sc,
- eq,
- CQ_LEN_1024,
- sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
+ cq = oce_cq_create(sc, eq,
+ sc->enable_hwlro ? CQ_LEN_2048 : CQ_LEN_1024,
+ sizeof(struct oce_nic_rx_cqe), 0, 1, 0, 3);
+
if (!cq)
return ENXIO;
@@ -548,14 +552,20 @@ oce_rq_del(struct oce_rq *rq)
POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
struct oce_mbx mbx;
struct mbx_delete_nic_rq *fwcmd;
+ struct mbx_delete_nic_rq_v1 *fwcmd1;
if (rq->qstate == QCREATED) {
bzero(&mbx, sizeof(mbx));
-
- fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
- fwcmd->params.req.rq_id = rq->rq_id;
- (void)oce_destroy_q(sc, &mbx,
- sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
+ if(!rq->islro) {
+ fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
+ fwcmd->params.req.rq_id = rq->rq_id;
+ (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
+ }else {
+ fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
+ fwcmd1->params.req.rq_id = rq->rq_id;
+ fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
+ (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq_v1), QTYPE_RQ, 1);
+ }
rq->qstate = QDELETED;
}
@@ -632,7 +642,7 @@ oce_eq_del(struct oce_eq *eq)
fwcmd = (struct mbx_destroy_common_eq *)&mbx.payload;
fwcmd->params.req.id = eq->eq_id;
(void)oce_destroy_q(sc, &mbx,
- sizeof(struct mbx_destroy_common_eq), QTYPE_EQ);
+ sizeof(struct mbx_destroy_common_eq), QTYPE_EQ, 0);
}
if (eq->ring != NULL) {
@@ -783,7 +793,7 @@ oce_mq_free(struct oce_mq *mq)
fwcmd->params.req.id = mq->mq_id;
(void) oce_destroy_q(sc, &mbx,
sizeof (struct mbx_destroy_common_mq),
- QTYPE_MQ);
+ QTYPE_MQ, 0);
}
mq->qstate = QDELETED;
}
@@ -810,7 +820,7 @@ oce_mq_free(struct oce_mq *mq)
*/
static int
oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
- enum qtype qtype)
+ enum qtype qtype, int version)
{
struct mbx_hdr *hdr = (struct mbx_hdr *)&mbx->payload;
int opcode;
@@ -844,7 +854,7 @@ oce_destroy_q(POCE_SOFTC sc, struct oce_mbx *mbx, size_t req_size,
mbx_common_req_hdr_init(hdr, 0, 0, subsys,
opcode, MBX_TIMEOUT_SEC, req_size,
- OCE_MBX_VER_V0);
+ version);
mbx->u0.s.embedded = 1;
mbx->payload_length = (uint32_t) req_size;
@@ -932,7 +942,7 @@ oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
fwcmd = (struct mbx_destroy_common_cq *)&mbx.payload;
fwcmd->params.req.id = cq->cq_id;
(void)oce_destroy_q(sc, &mbx,
- sizeof(struct mbx_destroy_common_cq), QTYPE_CQ);
+ sizeof(struct mbx_destroy_common_cq), QTYPE_CQ, 0);
/*NOW destroy the ring */
oce_destroy_ring_buffer(sc, cq->ring);
cq->ring = NULL;
@@ -951,12 +961,17 @@ oce_cq_del(POCE_SOFTC sc, struct oce_cq *cq)
int
oce_start_rq(struct oce_rq *rq)
{
+ POCE_SOFTC sc = (POCE_SOFTC) rq->parent;
int rc;
- rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len);
+ if(sc->enable_hwlro)
+ rc = oce_alloc_rx_bufs(rq, 960);
+ else
+ rc = oce_alloc_rx_bufs(rq, rq->cfg.q_len - 1);
if (rc == 0)
oce_arm_cq(rq->parent, rq->cq->cq_id, 0, TRUE);
+
return rc;
}
@@ -1148,7 +1163,7 @@ oce_free_posted_rxbuf(struct oce_rq *rq)
while (rq->pending) {
- pd = &rq->pckts[rq->packets_out];
+ pd = &rq->pckts[rq->ring->cidx];
bus_dmamap_sync(rq->tag, pd->map, BUS_DMASYNC_POSTWRITE);
bus_dmamap_unload(rq->tag, pd->map);
if (pd->mbuf != NULL) {
@@ -1156,44 +1171,179 @@ oce_free_posted_rxbuf(struct oce_rq *rq)
pd->mbuf = NULL;
}
- if ((rq->packets_out + 1) == OCE_RQ_PACKET_ARRAY_SIZE)
- rq->packets_out = 0;
- else
- rq->packets_out++;
-
+ RING_GET(rq->ring,1);
rq->pending--;
}
}
void
-oce_stop_rx(POCE_SOFTC sc)
+oce_rx_cq_clean_hwlro(struct oce_rq *rq)
{
- struct oce_mbx mbx;
- struct mbx_delete_nic_rq *fwcmd;
- struct oce_rq *rq;
- int i = 0;
-
- for_all_rq_queues(sc, rq, i) {
- if (rq->qstate == QCREATED) {
- /* Delete rxq in firmware */
-
- bzero(&mbx, sizeof(mbx));
- fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
- fwcmd->params.req.rq_id = rq->rq_id;
-
- (void)oce_destroy_q(sc, &mbx,
- sizeof(struct mbx_delete_nic_rq), QTYPE_RQ);
+ struct oce_cq *cq = rq->cq;
+ POCE_SOFTC sc = rq->parent;
+ struct nic_hwlro_singleton_cqe *cqe;
+ struct nic_hwlro_cqe_part2 *cqe2;
+ int flush_wait = 0;
+ int flush_compl = 0;
+ int num_frags = 0;
+
+ for (;;) {
+ bus_dmamap_sync(cq->ring->dma.tag,cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct nic_hwlro_singleton_cqe);
+ if(cqe->valid) {
+ if(cqe->cqe_type == 0) { /* singleton cqe */
+ /* we should not get singleton cqe after cqe1 on same rq */
+ if(rq->cqe_firstpart != NULL) {
+ device_printf(sc->dev, "Got singleton cqe after cqe1 \n");
+ goto exit_rx_cq_clean_hwlro;
+ }
+ num_frags = cqe->pkt_size / rq->cfg.frag_size;
+ if(cqe->pkt_size % rq->cfg.frag_size)
+ num_frags++;
+ oce_discard_rx_comp(rq, num_frags);
+ /* Check if CQE is flush completion */
+ if(!cqe->pkt_size)
+ flush_compl = 1;
+ cqe->valid = 0;
+ RING_GET(cq->ring, 1);
+ }else if(cqe->cqe_type == 0x1) { /* first part */
+ /* we should not get cqe1 after cqe1 on same rq */
+ if(rq->cqe_firstpart != NULL) {
+ device_printf(sc->dev, "Got cqe1 after cqe1 \n");
+ goto exit_rx_cq_clean_hwlro;
+ }
+ rq->cqe_firstpart = (struct nic_hwlro_cqe_part1 *)cqe;
+ RING_GET(cq->ring, 1);
+ }else if(cqe->cqe_type == 0x2) { /* second part */
+ cqe2 = (struct nic_hwlro_cqe_part2 *)cqe;
+ /* We should not get cqe2 without cqe1 */
+ if(rq->cqe_firstpart == NULL) {
+ device_printf(sc->dev, "Got cqe2 without cqe1 \n");
+ goto exit_rx_cq_clean_hwlro;
+ }
+ num_frags = cqe2->coalesced_size / rq->cfg.frag_size;
+ if(cqe2->coalesced_size % rq->cfg.frag_size)
+ num_frags++;
+
+ /* Flush completion will always come in singleton CQE */
+ oce_discard_rx_comp(rq, num_frags);
+
+ rq->cqe_firstpart->valid = 0;
+ cqe2->valid = 0;
+ rq->cqe_firstpart = NULL;
+ RING_GET(cq->ring, 1);
+ }
+ oce_arm_cq(sc, cq->cq_id, 1, FALSE);
+ if(flush_compl)
+ break;
+ }else {
+ if (flush_wait++ > 100) {
+ device_printf(sc->dev, "did not receive hwlro flush compl\n");
+ break;
+ }
+ oce_arm_cq(sc, cq->cq_id, 0, TRUE);
+ DELAY(1000);
+ }
+ }
+
+ /* After cleanup, leave the CQ in unarmed state */
+ oce_arm_cq(sc, cq->cq_id, 0, FALSE);
+
+exit_rx_cq_clean_hwlro:
+ return;
+}
- rq->qstate = QDELETED;
- DELAY(1);
+void
+oce_rx_cq_clean(struct oce_rq *rq)
+{
+ struct oce_nic_rx_cqe *cqe;
+ struct oce_cq *cq;
+ POCE_SOFTC sc;
+ int flush_wait = 0;
+ int flush_compl = 0;
+ sc = rq->parent;
+ cq = rq->cq;
+
+ for (;;) {
+ bus_dmamap_sync(cq->ring->dma.tag,
+ cq->ring->dma.map, BUS_DMASYNC_POSTWRITE);
+ cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_nic_rx_cqe);
+ if(RQ_CQE_VALID(cqe)) {
+ DW_SWAP((uint32_t *) cqe, sizeof(oce_rq_cqe));
+ oce_discard_rx_comp(rq, cqe->u0.s.num_fragments);
+ /* Check if CQE is flush completion */
+ if((cqe->u0.s.num_fragments==0)&&(cqe->u0.s.pkt_size == 0)&&(cqe->u0.s.error == 0))
+ flush_compl = 1;
+
+ RQ_CQE_INVALIDATE(cqe);
+ RING_GET(cq->ring, 1);
+#if defined(INET6) || defined(INET)
+ if (IF_LRO_ENABLED(sc))
+ oce_rx_flush_lro(rq);
+#endif
+ oce_arm_cq(sc, cq->cq_id, 1, FALSE);
+ if(flush_compl)
+ break;
+ }else {
+ if (flush_wait++ > 100) {
+ device_printf(sc->dev, "did not receive flush compl\n");
+ break;
+ }
+ oce_arm_cq(sc, cq->cq_id, 0, TRUE);
+ DELAY(1000);
+ }
+ }
+
+ /* After cleanup, leave the CQ in unarmed state */
+ oce_arm_cq(sc, cq->cq_id, 0, FALSE);
+}
- /* Free posted RX buffers that are not used */
- oce_free_posted_rxbuf(rq);
+void
+oce_stop_rx(POCE_SOFTC sc)
+{
+ struct oce_mbx mbx;
+ struct mbx_delete_nic_rq *fwcmd;
+ struct mbx_delete_nic_rq_v1 *fwcmd1;
+ struct oce_rq *rq;
+ int i = 0;
+
+ /* before deleting disable hwlro */
+ if(sc->enable_hwlro)
+ oce_mbox_nic_set_iface_lro_config(sc, 0);
+
+ for_all_rq_queues(sc, rq, i) {
+ if (rq->qstate == QCREATED) {
+ /* Delete rxq in firmware */
+ LOCK(&rq->rx_lock);
+
+ bzero(&mbx, sizeof(mbx));
+ if(!rq->islro) {
+ fwcmd = (struct mbx_delete_nic_rq *)&mbx.payload;
+ fwcmd->params.req.rq_id = rq->rq_id;
+ (void)oce_destroy_q(sc, &mbx, sizeof(struct mbx_delete_nic_rq), QTYPE_RQ, 0);
+ }else {
+ fwcmd1 = (struct mbx_delete_nic_rq_v1 *)&mbx.payload;
+ fwcmd1->params.req.rq_id = rq->rq_id;
+ fwcmd1->params.req.rq_flags = (NIC_RQ_FLAGS_RSS | NIC_RQ_FLAGS_LRO);
+
+ (void)oce_destroy_q(sc,&mbx,sizeof(struct mbx_delete_nic_rq_v1),QTYPE_RQ,1);
+ }
+ rq->qstate = QDELETED;
+
+ DELAY(1000);
- }
- }
+ if(!rq->islro)
+ oce_rx_cq_clean(rq);
+ else
+ oce_rx_cq_clean_hwlro(rq);
+
+ /* Free posted RX buffers that are not used */
+ oce_free_posted_rxbuf(rq);
+ UNLOCK(&rq->rx_lock);
+ }
+ }
}
@@ -1207,16 +1357,28 @@ oce_start_rx(POCE_SOFTC sc)
for_all_rq_queues(sc, rq, i) {
if (rq->qstate == QCREATED)
continue;
- rc = oce_mbox_create_rq(rq);
+ if((i == 0) || (!sc->enable_hwlro)) {
+ rc = oce_mbox_create_rq(rq);
+ if (rc)
+ goto error;
+ rq->islro = 0;
+ }else {
+ rc = oce_mbox_create_rq_v2(rq);
+ if (rc)
+ goto error;
+ rq->islro = 1;
+ }
+ /* reset queue pointers */
+ rq->qstate = QCREATED;
+ rq->pending = 0;
+ rq->ring->cidx = 0;
+ rq->ring->pidx = 0;
+ }
+
+ if(sc->enable_hwlro) {
+ rc = oce_mbox_nic_set_iface_lro_config(sc, 1);
if (rc)
goto error;
- /* reset queue pointers */
- rq->qstate = QCREATED;
- rq->pending = 0;
- rq->ring->cidx = 0;
- rq->ring->pidx = 0;
- rq->packets_in = 0;
- rq->packets_out = 0;
}
DELAY(1);
@@ -1229,6 +1391,7 @@ oce_start_rx(POCE_SOFTC sc)
}
+ DELAY(1);
return rc;
error:
device_printf(sc->dev, "Start RX failed\n");
diff --git a/sys/dev/oce/oce_sysctl.c b/sys/dev/oce/oce_sysctl.c
index 61adf93ac69d..1fe4a636af74 100644
--- a/sys/dev/oce/oce_sysctl.c
+++ b/sys/dev/oce/oce_sysctl.c
@@ -43,6 +43,7 @@
static void copy_stats_to_sc_xe201(POCE_SOFTC sc);
static void copy_stats_to_sc_be3(POCE_SOFTC sc);
static void copy_stats_to_sc_be2(POCE_SOFTC sc);
+static void copy_stats_to_sc_sh(POCE_SOFTC sc);
static int oce_sysctl_loopback(SYSCTL_HANDLER_ARGS);
static int oce_sys_aic_enable(SYSCTL_HANDLER_ARGS);
static int oce_be3_fwupgrade(POCE_SOFTC sc, const struct firmware *fw);
@@ -182,6 +183,8 @@ oce_sys_aic_enable(SYSCTL_HANDLER_ARGS)
POCE_SOFTC sc = (struct oce_softc *)arg1;
struct oce_aic_obj *aic;
+ /* set current value for proper sysctl logging */
+ value = sc->aic_obj[0].enable;
status = sysctl_handle_int(oidp, &value, 0, req);
if (status || !req->newptr)
return status;
@@ -482,34 +485,34 @@ ret:
return rc;
}
-#define UFI_TYPE2 2
-#define UFI_TYPE3 3
-#define UFI_TYPE3R 10
-#define UFI_TYPE4 4
-#define UFI_TYPE4R 11
+#define UFI_TYPE2 2
+#define UFI_TYPE3 3
+#define UFI_TYPE3R 10
+#define UFI_TYPE4 4
+#define UFI_TYPE4R 11
static int oce_get_ufi_type(POCE_SOFTC sc,
- const struct flash_file_hdr *fhdr)
+ const struct flash_file_hdr *fhdr)
{
- if (fhdr == NULL)
- goto be_get_ufi_exit;
-
- if (IS_SH(sc) && fhdr->build[0] == '4') {
- if (fhdr->asic_type_rev >= 0x10)
- return UFI_TYPE4R;
- else
- return UFI_TYPE4;
- } else if (IS_BE3(sc) && fhdr->build[0] == '3') {
- if (fhdr->asic_type_rev == 0x10)
- return UFI_TYPE3R;
- else
- return UFI_TYPE3;
- } else if (IS_BE2(sc) && fhdr->build[0] == '2')
- return UFI_TYPE2;
+ if (fhdr == NULL)
+ goto be_get_ufi_exit;
+
+ if (IS_SH(sc) && fhdr->build[0] == '4') {
+ if (fhdr->asic_type_rev >= 0x10)
+ return UFI_TYPE4R;
+ else
+ return UFI_TYPE4;
+ } else if (IS_BE3(sc) && fhdr->build[0] == '3') {
+ if (fhdr->asic_type_rev == 0x10)
+ return UFI_TYPE3R;
+ else
+ return UFI_TYPE3;
+ } else if (IS_BE2(sc) && fhdr->build[0] == '2')
+ return UFI_TYPE2;
be_get_ufi_exit:
- device_printf(sc->dev,
- "UFI and Interface are not compatible for flashing\n");
- return -1;
+ device_printf(sc->dev,
+ "UFI and Interface are not compatible for flashing\n");
+ return -1;
}
@@ -777,7 +780,11 @@ oce_add_stats_sysctls_be3(POCE_SOFTC sc,
SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rxcp_err",
CTLFLAG_RD, &sc->rq[i]->rx_stats.rxcp_err, 0,
"Received Completion Errors");
-
+ if(IS_SH(sc)) {
+ SYSCTL_ADD_UINT(ctx, queue_stats_list, OID_AUTO, "rx_drops_no_frags",
+ CTLFLAG_RD, &sc->rq[i]->rx_stats.rx_drops_no_frags, 0,
+ "num of packet drops due to no fragments");
+ }
}
rx_stats_node = SYSCTL_ADD_NODE(ctx,
@@ -1372,10 +1379,10 @@ copy_stats_to_sc_be3(POCE_SOFTC sc)
struct oce_pmem_stats *pmem;
struct oce_rxf_stats_v1 *rxf_stats;
struct oce_port_rxf_stats_v1 *port_stats;
- struct mbx_get_nic_stats *nic_mbx;
+ struct mbx_get_nic_stats_v1 *nic_mbx;
uint32_t port = sc->port_id;
- nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats);
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v1);
pmem = &nic_mbx->params.rsp.stats.pmem;
rxf_stats = &nic_mbx->params.rsp.stats.rxf;
port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
@@ -1429,18 +1436,91 @@ copy_stats_to_sc_be3(POCE_SOFTC sc)
adapter_stats->eth_red_drops = pmem->eth_red_drops;
}
+static void
+copy_stats_to_sc_sh(POCE_SOFTC sc)
+{
+ struct oce_be_stats *adapter_stats;
+ struct oce_pmem_stats *pmem;
+ struct oce_rxf_stats_v2 *rxf_stats;
+ struct oce_port_rxf_stats_v2 *port_stats;
+ struct mbx_get_nic_stats_v2 *nic_mbx;
+ struct oce_erx_stats_v2 *erx_stats;
+ uint32_t port = sc->port_id;
+
+ nic_mbx = OCE_DMAPTR(&sc->stats_mem, struct mbx_get_nic_stats_v2);
+ pmem = &nic_mbx->params.rsp.stats.pmem;
+ rxf_stats = &nic_mbx->params.rsp.stats.rxf;
+ erx_stats = &nic_mbx->params.rsp.stats.erx;
+ port_stats = &nic_mbx->params.rsp.stats.rxf.port[port];
+
+ adapter_stats = &sc->oce_stats_info.u0.be;
+
+ /* Update stats */
+ adapter_stats->pmem_fifo_overflow_drop =
+ port_stats->pmem_fifo_overflow_drop;
+ adapter_stats->rx_priority_pause_frames =
+ port_stats->rx_priority_pause_frames;
+ adapter_stats->rx_pause_frames = port_stats->rx_pause_frames;
+ adapter_stats->rx_crc_errors = port_stats->rx_crc_errors;
+ adapter_stats->rx_control_frames = port_stats->rx_control_frames;
+ adapter_stats->rx_in_range_errors = port_stats->rx_in_range_errors;
+ adapter_stats->rx_frame_too_long = port_stats->rx_frame_too_long;
+ adapter_stats->rx_dropped_runt = port_stats->rx_dropped_runt;
+ adapter_stats->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
+ adapter_stats->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
+ adapter_stats->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
+ adapter_stats->rx_dropped_tcp_length =
+ port_stats->rx_dropped_tcp_length;
+ adapter_stats->rx_dropped_too_small = port_stats->rx_dropped_too_small;
+ adapter_stats->rx_dropped_too_short = port_stats->rx_dropped_too_short;
+ adapter_stats->rx_out_range_errors = port_stats->rx_out_range_errors;
+ adapter_stats->rx_dropped_header_too_small =
+ port_stats->rx_dropped_header_too_small;
+ adapter_stats->rx_input_fifo_overflow_drop =
+ port_stats->rx_input_fifo_overflow_drop;
+ adapter_stats->rx_address_match_errors =
+ port_stats->rx_address_match_errors;
+ adapter_stats->rx_alignment_symbol_errors =
+ port_stats->rx_alignment_symbol_errors;
+ adapter_stats->rxpp_fifo_overflow_drop =
+ port_stats->rxpp_fifo_overflow_drop;
+ adapter_stats->tx_pauseframes = port_stats->tx_pauseframes;
+ adapter_stats->tx_controlframes = port_stats->tx_controlframes;
+ adapter_stats->jabber_events = port_stats->jabber_events;
+
+ adapter_stats->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
+ adapter_stats->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
+ adapter_stats->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
+ adapter_stats->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
+ adapter_stats->forwarded_packets = rxf_stats->forwarded_packets;
+ adapter_stats->rx_drops_mtu = rxf_stats->rx_drops_mtu;
+ adapter_stats->rx_drops_no_tpre_descr =
+ rxf_stats->rx_drops_no_tpre_descr;
+ adapter_stats->rx_drops_too_many_frags =
+ rxf_stats->rx_drops_too_many_frags;
+
+ adapter_stats->eth_red_drops = pmem->eth_red_drops;
+
+ /* populate erx stats */
+ for (int i = 0; i < sc->nrqs; i++)
+ sc->rq[i]->rx_stats.rx_drops_no_frags = erx_stats->rx_drops_no_fragments[sc->rq[i]->rq_id];
+}
+
+
int
oce_stats_init(POCE_SOFTC sc)
{
- int rc = 0, sz;
-
- if (IS_BE(sc) || IS_SH(sc)) {
- if (sc->flags & OCE_FLAGS_BE2)
- sz = sizeof(struct mbx_get_nic_stats_v0);
- else
- sz = sizeof(struct mbx_get_nic_stats);
- } else
+ int rc = 0, sz = 0;
+
+
+ if( IS_BE2(sc) )
+ sz = sizeof(struct mbx_get_nic_stats_v0);
+ else if( IS_BE3(sc) )
+ sz = sizeof(struct mbx_get_nic_stats_v1);
+ else if( IS_SH(sc))
+ sz = sizeof(struct mbx_get_nic_stats_v2);
+ else if( IS_XE201(sc) )
sz = sizeof(struct mbx_get_pport_stats);
rc = oce_dma_alloc(sc, sz, &sc->stats_mem, 0);
@@ -1463,23 +1543,24 @@ oce_refresh_nic_stats(POCE_SOFTC sc)
{
int rc = 0, reset = 0;
- if (IS_BE(sc) || IS_SH(sc)) {
- if (sc->flags & OCE_FLAGS_BE2) {
- rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem);
- if (!rc)
- copy_stats_to_sc_be2(sc);
- } else {
- rc = oce_mbox_get_nic_stats(sc, &sc->stats_mem);
- if (!rc)
- copy_stats_to_sc_be3(sc);
- }
-
- } else {
+ if( IS_BE2(sc) ) {
+ rc = oce_mbox_get_nic_stats_v0(sc, &sc->stats_mem);
+ if (!rc)
+ copy_stats_to_sc_be2(sc);
+ }else if( IS_BE3(sc) ) {
+ rc = oce_mbox_get_nic_stats_v1(sc, &sc->stats_mem);
+ if (!rc)
+ copy_stats_to_sc_be3(sc);
+ }else if( IS_SH(sc)) {
+ rc = oce_mbox_get_nic_stats_v2(sc, &sc->stats_mem);
+ if (!rc)
+ copy_stats_to_sc_sh(sc);
+ }else if( IS_XE201(sc) ){
rc = oce_mbox_get_pport_stats(sc, &sc->stats_mem, reset);
if (!rc)
copy_stats_to_sc_xe201(sc);
}
-
+
return rc;
}
diff --git a/sys/dev/oce/oce_user.h b/sys/dev/oce/oce_user.h
new file mode 100644
index 000000000000..ae1f96d5d317
--- /dev/null
+++ b/sys/dev/oce/oce_user.h
@@ -0,0 +1,121 @@
+/*-
+ * Copyright (C) 2013 Emulex
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * 3. Neither the name of the Emulex Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Contact Information:
+ * freebsd-drivers@emulex.com
+ *
+ * Emulex
+ * 3333 Susan Street
+ * Costa Mesa, CA 92626
+ */
+
+/* $FreeBSD$ */
+
+struct oce_mbx;
+struct oce_softc;
+struct mbx_hdr;
+
+enum oce_interrupt_mode {
+ OCE_INTERRUPT_MODE_MSIX = 0,
+ OCE_INTERRUPT_MODE_INTX = 1,
+ OCE_INTERRUPT_MODE_MSI = 2,
+};
+
+#define MAX_ROCE_MSIX_VECTORS 16
+#define MIN_ROCE_MSIX_VECTORS 1
+#define ROCE_MSIX_VECTORS 2
+
+struct oce_dev_info {
+ device_t dev;
+ struct ifnet *ifp;
+ struct oce_softc *softc;
+
+ bus_space_handle_t db_bhandle;
+ bus_space_tag_t db_btag;
+ uint64_t unmapped_db;
+ uint32_t unmapped_db_len;
+ uint32_t db_page_size;
+ uint64_t dpp_unmapped_addr;
+ uint32_t dpp_unmapped_len;
+ uint8_t mac_addr[6];
+ uint32_t dev_family;
+ uint16_t vendor_id;
+ uint16_t dev_id;
+ enum oce_interrupt_mode intr_mode;
+ struct {
+ int num_vectors;
+ int start_vector;
+ uint32_t vector_list[MAX_ROCE_MSIX_VECTORS];
+ } msix;
+ uint32_t flags;
+#define OCE_RDMA_INFO_RDMA_SUPPORTED 0x00000001
+};
+
+
+#define OCE_GEN2_FAMILY 2
+
+#ifdef notdef
+struct oce_mbx_ctx {
+ struct oce_mbx *mbx;
+ void (*cb) (void *ctx);
+ void *cb_ctx;
+};
+#endif
+
+struct oce_mbx_ctx;
+
+typedef struct oce_rdma_info {
+ int size;
+ void (*close)(void);
+ int (*mbox_post)(struct oce_softc *sc,
+ struct oce_mbx *mbx,
+ struct oce_mbx_ctx *mbxctx);
+ void (*common_req_hdr_init)(struct mbx_hdr *hdr,
+ uint8_t dom,
+ uint8_t port,
+ uint8_t subsys,
+ uint8_t opcode,
+ uint32_t timeout,
+ uint32_t pyld_len,
+ uint8_t version);
+ void (*get_mac_addr)(struct oce_softc *sc,
+ uint8_t *macaddr);
+} OCE_RDMA_INFO, *POCE_RDMA_INFO;
+
+#define OCE_RDMA_INFO_SIZE (sizeof(OCE_RDMA_INFO))
+
+typedef struct oce_rdma_if {
+ int size;
+ int (*announce)(struct oce_dev_info *devinfo);
+} OCE_RDMA_IF, *POCE_RDMA_IF;
+
+#define OCE_RDMA_IF_SIZE (sizeof(OCE_RDMA_IF))
+
+int oce_register_rdma(POCE_RDMA_INFO rdma_info, POCE_RDMA_IF rdma_if);