aboutsummaryrefslogtreecommitdiff
path: root/sys/dev
diff options
context:
space:
mode:
authorDimitry Andric <dim@FreeBSD.org>2016-08-16 20:19:05 +0000
committerDimitry Andric <dim@FreeBSD.org>2016-08-16 20:19:05 +0000
commit27067774dce3388702a4cf744d7096c6fb71b688 (patch)
tree56300949abd050b3cd2e23d29210d2a567bc8aec /sys/dev
parent44be0a8ea517cbe7a9140bca20e1e93228ac0a04 (diff)
parent915a263ea24ab051bb57674a4d6f5ffe4ef0d5b5 (diff)
downloadsrc-27067774dce3388702a4cf744d7096c6fb71b688.tar.gz
src-27067774dce3388702a4cf744d7096c6fb71b688.zip
Merge ^/head r303250 through r304235.
Notes
Notes: svn path=/projects/clang390-import/; revision=304236
Diffstat (limited to 'sys/dev')
-rw-r--r--sys/dev/aic7xxx/aic7xxx_osm.c2
-rw-r--r--sys/dev/ath/ath_hal/ar5212/ar5212_attach.c4
-rw-r--r--sys/dev/ath/ath_hal/ar5416/ar5416_attach.c4
-rw-r--r--sys/dev/ath/if_athioctl.h4
-rw-r--r--sys/dev/auxio/auxio.c2
-rw-r--r--sys/dev/bktr/bktr_os.c18
-rw-r--r--sys/dev/bktr/bktr_reg.h2
-rw-r--r--sys/dev/bxe/bxe.c5
-rw-r--r--sys/dev/cfe/cfe_api.c24
-rw-r--r--sys/dev/cfe/cfe_api.h5
-rw-r--r--sys/dev/cfe/cfe_api_int.h3
-rw-r--r--sys/dev/cfe/cfe_error.h3
-rw-r--r--sys/dev/cfe/cfe_ioctl.h123
-rw-r--r--sys/dev/cxgbe/adapter.h10
-rw-r--r--sys/dev/cxgbe/common/common.h2
-rw-r--r--sys/dev/cxgbe/common/t4_hw.c107
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/cq.c2
-rw-r--r--sys/dev/cxgbe/iw_cxgbe/qp.c4
-rw-r--r--sys/dev/cxgbe/offload.h1
-rw-r--r--sys/dev/cxgbe/t4_if.m10
-rw-r--r--sys/dev/cxgbe/t4_iov.c16
-rw-r--r--sys/dev/cxgbe/t4_main.c78
-rw-r--r--sys/dev/cxgbe/t4_netmap.c18
-rw-r--r--sys/dev/cxgbe/t4_sge.c49
-rw-r--r--sys/dev/cxgbe/tom/t4_cpl_io.c485
-rw-r--r--sys/dev/cxgbe/tom/t4_ddp.c39
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.c51
-rw-r--r--sys/dev/cxgbe/tom/t4_tom.h15
-rw-r--r--sys/dev/e1000/e1000_api.c4
-rw-r--r--sys/dev/e1000/e1000_hw.h8
-rw-r--r--sys/dev/e1000/e1000_ich8lan.c17
-rw-r--r--sys/dev/e1000/e1000_ich8lan.h2
-rw-r--r--sys/dev/e1000/e1000_osdep.h2
-rw-r--r--sys/dev/e1000/e1000_phy.c22
-rw-r--r--sys/dev/e1000/if_em.c7
-rw-r--r--sys/dev/e1000/if_em.h2
-rw-r--r--sys/dev/e1000/if_igb.h2
-rw-r--r--sys/dev/e1000/if_lem.h2
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch.c153
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_7240.c2
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_8316.c1
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_8327.c34
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_8327.h5
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_phy.c14
-rw-r--r--sys/dev/etherswitch/arswitch/arswitch_reg.c11
-rw-r--r--sys/dev/etherswitch/arswitch/arswitchvar.h35
-rw-r--r--sys/dev/etherswitch/etherswitch.h15
-rw-r--r--sys/dev/etherswitch/ip17x/ip17x.c2
-rw-r--r--sys/dev/fdt/fdt_common.c3
-rw-r--r--sys/dev/fdt/fdt_common.h3
-rw-r--r--sys/dev/filemon/filemon.c2
-rw-r--r--sys/dev/gpio/gpioled.c5
-rw-r--r--sys/dev/hwpmc/hwpmc_core.c2
-rw-r--r--sys/dev/hyperv/include/hyperv.h32
-rw-r--r--sys/dev/hyperv/include/hyperv_busdma.h12
-rw-r--r--sys/dev/hyperv/include/vmbus.h80
-rw-r--r--sys/dev/hyperv/include/vmbus_xact.h59
-rw-r--r--sys/dev/hyperv/netvsc/hv_net_vsc.c601
-rw-r--r--sys/dev/hyperv/netvsc/hv_net_vsc.h38
-rw-r--r--sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c75
-rw-r--r--sys/dev/hyperv/netvsc/hv_rndis.h4
-rw-r--r--sys/dev/hyperv/netvsc/hv_rndis_filter.c204
-rw-r--r--sys/dev/hyperv/netvsc/hv_rndis_filter.h3
-rw-r--r--sys/dev/hyperv/netvsc/if_hnreg.h199
-rw-r--r--sys/dev/hyperv/netvsc/if_hnvar.h104
-rw-r--r--sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c296
-rw-r--r--sys/dev/hyperv/utilities/hv_heartbeat.c12
-rw-r--r--sys/dev/hyperv/utilities/hv_kvp.c7
-rw-r--r--sys/dev/hyperv/utilities/hv_shutdown.c12
-rw-r--r--sys/dev/hyperv/utilities/hv_timesync.c12
-rw-r--r--sys/dev/hyperv/utilities/hv_util.c40
-rw-r--r--sys/dev/hyperv/utilities/hv_util.h15
-rw-r--r--sys/dev/hyperv/vmbus/hv_ring_buffer.c524
-rw-r--r--sys/dev/hyperv/vmbus/hv_vmbus_priv.h85
-rw-r--r--sys/dev/hyperv/vmbus/hyperv.c23
-rw-r--r--sys/dev/hyperv/vmbus/hyperv_reg.h1
-rw-r--r--sys/dev/hyperv/vmbus/vmbus.c369
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_br.c404
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_brvar.h104
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_chan.c99
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_chanvar.h20
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_et.c49
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_var.h50
-rw-r--r--sys/dev/hyperv/vmbus/vmbus_xact.c313
-rw-r--r--sys/dev/ioat/ioat.c34
-rw-r--r--sys/dev/iwm/if_iwm.c1838
-rw-r--r--sys/dev/iwm/if_iwm_led.c6
-rw-r--r--sys/dev/iwm/if_iwm_led.h10
-rw-r--r--sys/dev/iwm/if_iwm_mac_ctxt.c28
-rw-r--r--sys/dev/iwm/if_iwm_pcie_trans.c53
-rw-r--r--sys/dev/iwm/if_iwm_phy_ctxt.c8
-rw-r--r--sys/dev/iwm/if_iwm_phy_db.c2
-rw-r--r--sys/dev/iwm/if_iwm_power.c11
-rw-r--r--sys/dev/iwm/if_iwm_scan.c581
-rw-r--r--sys/dev/iwm/if_iwm_scan.h6
-rw-r--r--sys/dev/iwm/if_iwm_time_event.c6
-rw-r--r--sys/dev/iwm/if_iwm_util.c88
-rw-r--r--sys/dev/iwm/if_iwm_util.h3
-rw-r--r--sys/dev/iwm/if_iwmreg.h1367
-rw-r--r--sys/dev/iwm/if_iwmvar.h45
-rw-r--r--sys/dev/ixgb/if_ixgb.h2
-rw-r--r--sys/dev/ixgb/if_ixgb_osdep.h2
-rw-r--r--sys/dev/ixgbe/ixgbe.h2
-rw-r--r--sys/dev/ixl/i40e_adminq.c37
-rw-r--r--sys/dev/ixl/i40e_adminq.h1
-rw-r--r--sys/dev/ixl/i40e_adminq_cmd.h192
-rw-r--r--sys/dev/ixl/i40e_common.c507
-rw-r--r--sys/dev/ixl/i40e_devids.h12
-rw-r--r--sys/dev/ixl/i40e_nvm.c118
-rw-r--r--sys/dev/ixl/i40e_osdep.c29
-rw-r--r--sys/dev/ixl/i40e_osdep.h17
-rw-r--r--sys/dev/ixl/i40e_prototype.h43
-rw-r--r--sys/dev/ixl/i40e_register.h1962
-rw-r--r--sys/dev/ixl/i40e_type.h164
-rw-r--r--sys/dev/ixl/i40e_virtchnl.h45
-rw-r--r--sys/dev/ixl/if_ixl.c7148
-rw-r--r--sys/dev/ixl/if_ixlv.c309
-rw-r--r--sys/dev/ixl/ixl.h161
-rw-r--r--sys/dev/ixl/ixl_pf.h210
-rw-r--r--sys/dev/ixl/ixl_pf_iov.c1925
-rw-r--r--sys/dev/ixl/ixl_pf_iov.h62
-rw-r--r--sys/dev/ixl/ixl_pf_main.c5557
-rw-r--r--sys/dev/ixl/ixl_pf_qmgr.c308
-rw-r--r--sys/dev/ixl/ixl_pf_qmgr.h109
-rw-r--r--sys/dev/ixl/ixl_txrx.c64
-rw-r--r--sys/dev/ixl/ixlv.h35
-rw-r--r--sys/dev/ixl/ixlvc.c147
-rw-r--r--sys/dev/kbd/kbd.c16
-rw-r--r--sys/dev/mcd/mcd.c1652
-rw-r--r--sys/dev/mcd/mcd_isa.c202
-rw-r--r--sys/dev/mcd/mcdreg.h219
-rw-r--r--sys/dev/mcd/mcdvar.h71
-rw-r--r--sys/dev/mlx5/mlx5_en/en.h8
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_main.c31
-rw-r--r--sys/dev/mlx5/mlx5_en/mlx5_en_rx.c19
-rw-r--r--sys/dev/mlx5/mlx5_en/tcp_tlro.c697
-rw-r--r--sys/dev/mlx5/mlx5_en/tcp_tlro.h83
-rw-r--r--sys/dev/mpt/mpt_pci.c4
-rw-r--r--sys/dev/msk/if_msk.c12
-rw-r--r--sys/dev/nand/nand_generic.c10
-rw-r--r--sys/dev/nand/nandsim_chip.c5
-rw-r--r--sys/dev/netmap/netmap_mem2.c4
-rw-r--r--sys/dev/ntb/if_ntb/if_ntb.c12
-rw-r--r--sys/dev/ntb/ntb.c420
-rw-r--r--sys/dev/ntb/ntb.h372
-rw-r--r--sys/dev/ntb/ntb_hw/ntb_hw.c754
-rw-r--r--sys/dev/ntb/ntb_if.m287
-rw-r--r--sys/dev/ntb/ntb_transport.c330
-rw-r--r--sys/dev/ntb/ntb_transport.h9
-rw-r--r--sys/dev/nvme/nvme_sim.c5
-rw-r--r--sys/dev/nvram2env/nvram2env.c132
-rw-r--r--sys/dev/nvram2env/nvram2env.h88
-rw-r--r--sys/dev/nvram2env/nvram2env_mips.c69
-rw-r--r--sys/dev/ofw/ofw_fdt.c21
-rw-r--r--sys/dev/ofw/ofwpci.c4
-rw-r--r--sys/dev/ofw/openfirmio.c10
-rw-r--r--sys/dev/ofw/openpromio.c8
-rw-r--r--sys/dev/pci/pci_if.m1
-rw-r--r--sys/dev/pci/pci_iov.c18
-rw-r--r--sys/dev/pci/pci_iov.h5
-rw-r--r--sys/dev/pci/pci_pci.c27
-rw-r--r--sys/dev/pci/pci_private.h3
-rw-r--r--sys/dev/pci/pci_user.c14
-rw-r--r--sys/dev/pci/pcivar.h2
-rw-r--r--sys/dev/sound/sbus/cs4231.c2
-rw-r--r--sys/dev/syscons/syscons.c113
-rw-r--r--sys/dev/syscons/syscons.h5
-rw-r--r--sys/dev/tpm/tpm.c8
-rw-r--r--sys/dev/tws/tws.c16
-rw-r--r--sys/dev/uart/uart_cpu_fdt.c47
-rw-r--r--sys/dev/usb/controller/ehci_ixp4xx.c12
-rw-r--r--sys/dev/usb/controller/generic_ehci.c220
-rw-r--r--sys/dev/usb/input/ukbd.c12
-rw-r--r--sys/dev/usb/serial/u3g.c1
-rw-r--r--sys/dev/usb/serial/uark.c1
-rw-r--r--sys/dev/usb/serial/ubsa.c16
-rw-r--r--sys/dev/usb/serial/uchcom.c1
-rw-r--r--sys/dev/usb/serial/ufoma.c1
-rw-r--r--sys/dev/usb/serial/umcs.c21
-rw-r--r--sys/dev/usb/serial/umct.c25
-rw-r--r--sys/dev/usb/serial/umodem.c1
-rw-r--r--sys/dev/usb/serial/umoscom.c8
-rw-r--r--sys/dev/usb/serial/uplcom.c1
-rw-r--r--sys/dev/usb/serial/uslcom.c1
-rw-r--r--sys/dev/usb/template/usb_template_mtp.c2
-rw-r--r--sys/dev/usb/usb_device.c4
-rw-r--r--sys/dev/usb/usbdevs1
-rw-r--r--sys/dev/virtio/network/if_vtnet.c61
-rw-r--r--sys/dev/virtio/network/if_vtnetvar.h1
-rw-r--r--sys/dev/vt/vt_core.c2
-rw-r--r--sys/dev/xen/netfront/netfront.c60
191 files changed, 20524 insertions, 14284 deletions
diff --git a/sys/dev/aic7xxx/aic7xxx_osm.c b/sys/dev/aic7xxx/aic7xxx_osm.c
index a51177c97899..141cac1c8fb6 100644
--- a/sys/dev/aic7xxx/aic7xxx_osm.c
+++ b/sys/dev/aic7xxx/aic7xxx_osm.c
@@ -130,7 +130,7 @@ aic7770_map_registers(struct ahc_softc *ahc, u_int unused_ioport_arg)
return ENOMEM;
}
ahc->platform_data->regs_res_type = SYS_RES_IOPORT;
- ahc->platform_data->regs_res_id = rid,
+ ahc->platform_data->regs_res_id = rid;
ahc->platform_data->regs = regs;
ahc->tag = rman_get_bustag(regs);
ahc->bsh = rman_get_bushandle(regs);
diff --git a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
index c7c77a9e2855..83b49139d65a 100644
--- a/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
+++ b/sys/dev/ath/ath_hal/ar5212/ar5212_attach.c
@@ -270,8 +270,8 @@ ar5212InitState(struct ath_hal_5212 *ahp, uint16_t devid, HAL_SOFTC sc,
ahp->ah_acktimeout = (u_int) -1;
ahp->ah_ctstimeout = (u_int) -1;
ahp->ah_sifstime = (u_int) -1;
- ahp->ah_txTrigLev = INIT_TX_FIFO_THRESHOLD,
- ahp->ah_maxTxTrigLev = MAX_TX_FIFO_THRESHOLD,
+ ahp->ah_txTrigLev = INIT_TX_FIFO_THRESHOLD;
+ ahp->ah_maxTxTrigLev = MAX_TX_FIFO_THRESHOLD;
OS_MEMCPY(&ahp->ah_bssidmask, defbssidmask, IEEE80211_ADDR_LEN);
#undef N
diff --git a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
index 98482fde42aa..5b10c7c80fd9 100644
--- a/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
+++ b/sys/dev/ath/ath_hal/ar5416/ar5416_attach.c
@@ -103,8 +103,8 @@ ar5416InitState(struct ath_hal_5416 *ahp5416, uint16_t devid, HAL_SOFTC sc,
ah->ah_configPCIE = ar5416ConfigPCIE;
ah->ah_disablePCIE = ar5416DisablePCIE;
ah->ah_perCalibration = ar5416PerCalibration;
- ah->ah_perCalibrationN = ar5416PerCalibrationN,
- ah->ah_resetCalValid = ar5416ResetCalValid,
+ ah->ah_perCalibrationN = ar5416PerCalibrationN;
+ ah->ah_resetCalValid = ar5416ResetCalValid;
ah->ah_setTxPowerLimit = ar5416SetTxPowerLimit;
ah->ah_setTxPower = ar5416SetTransmitPower;
ah->ah_setBoardValues = ar5416SetBoardValues;
diff --git a/sys/dev/ath/if_athioctl.h b/sys/dev/ath/if_athioctl.h
index 7b69ab5fe6c2..73905d3fa0a4 100644
--- a/sys/dev/ath/if_athioctl.h
+++ b/sys/dev/ath/if_athioctl.h
@@ -303,8 +303,8 @@ struct ath_radiotap_vendor_hdr { /* 30 bytes */
/* At this point it should be 4 byte aligned */
uint32_t evm[ATH_RADIOTAP_MAX_EVM]; /* 5 * 4 = 20 */
- uint8_t rssi_ctl[ATH_RADIOTAP_MAX_CHAINS]; /* 4 */
- uint8_t rssi_ext[ATH_RADIOTAP_MAX_CHAINS]; /* 4 */
+ uint8_t rssi_ctl[ATH_RADIOTAP_MAX_CHAINS]; /* 4 * 4 = 16 */
+ uint8_t rssi_ext[ATH_RADIOTAP_MAX_CHAINS]; /* 4 * 4 = 16 */
uint8_t vh_phyerr_code; /* Phy error code, or 0xff */
uint8_t vh_rs_status; /* RX status */
diff --git a/sys/dev/auxio/auxio.c b/sys/dev/auxio/auxio.c
index b104d5af5ee9..f559c173b867 100644
--- a/sys/dev/auxio/auxio.c
+++ b/sys/dev/auxio/auxio.c
@@ -98,7 +98,7 @@ __FBSDID("$FreeBSD$");
#define AUXIO_PCIO_NREG 5
struct auxio_softc {
- struct device *sc_dev;
+ device_t sc_dev;
int sc_nauxio;
struct resource *sc_res[AUXIO_PCIO_NREG];
diff --git a/sys/dev/bktr/bktr_os.c b/sys/dev/bktr/bktr_os.c
index aa41454f0cab..42069b39db3b 100644
--- a/sys/dev/bktr/bktr_os.c
+++ b/sys/dev/bktr/bktr_os.c
@@ -889,10 +889,11 @@ vm_offset_t vm_page_alloc_contig(vm_offset_t, vm_offset_t,
#if defined(__OpenBSD__)
static int bktr_probe(struct device *, void *, void *);
+static void bktr_attach(struct device *, struct device *, void *);
#else
-static int bktr_probe(struct device *, struct cfdata *, void *);
+static int bktr_probe(device_t, struct cfdata *, void *);
+static void bktr_attach(device_t, device_t, void *);
#endif
-static void bktr_attach(struct device *, struct device *, void *);
struct cfattach bktr_ca = {
sizeof(struct bktr_softc), bktr_probe, bktr_attach
@@ -908,10 +909,11 @@ struct cfdriver bktr_cd = {
int
bktr_probe(parent, match, aux)
- struct device *parent;
#if defined(__OpenBSD__)
+ struct device *parent;
void *match;
#else
+ device_t parent;
struct cfdata *match;
#endif
void *aux;
@@ -933,7 +935,15 @@ bktr_probe(parent, match, aux)
* the attach routine.
*/
static void
-bktr_attach(struct device *parent, struct device *self, void *aux)
+bktr_attach(parent, self, aux)
+#if defined(__OpenBSD__)
+ struct device *parent;
+ struct device *self;
+#else
+ device_t parent;
+ device_t self;
+#endif
+ void *aux;
{
bktr_ptr_t bktr;
u_long latency;
diff --git a/sys/dev/bktr/bktr_reg.h b/sys/dev/bktr/bktr_reg.h
index 4a3af1a490b0..65a6dbc359a0 100644
--- a/sys/dev/bktr/bktr_reg.h
+++ b/sys/dev/bktr/bktr_reg.h
@@ -35,7 +35,7 @@
*/
#ifdef __NetBSD__
-#include <machine/bus.h> /* struct device */
+#include <machine/bus.h> /* device_t */
#include <sys/device.h>
#include <sys/select.h> /* struct selinfo */
# ifdef DEBUG
diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c
index e832389ba610..a5c13ea600f3 100644
--- a/sys/dev/bxe/bxe.c
+++ b/sys/dev/bxe/bxe.c
@@ -5624,7 +5624,8 @@ bxe_tx_mq_start_locked(struct bxe_softc *sc,
if (!sc->link_vars.link_up ||
(if_getdrvflags(ifp) &
(IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) {
- rc = drbr_enqueue(ifp, tx_br, m);
+ if (m != NULL)
+ rc = drbr_enqueue(ifp, tx_br, m);
goto bxe_tx_mq_start_locked_exit;
}
@@ -13331,7 +13332,7 @@ bxe_get_shmem_info(struct bxe_softc *sc)
/* get the port feature config */
sc->port.config =
- SHMEM_RD(sc, dev_info.port_feature_config[port].config),
+ SHMEM_RD(sc, dev_info.port_feature_config[port].config);
/* get the link params */
sc->link_params.speed_cap_mask[0] =
diff --git a/sys/dev/cfe/cfe_api.c b/sys/dev/cfe/cfe_api.c
index bf3fa66e895e..1b3610af875c 100644
--- a/sys/dev/cfe/cfe_api.c
+++ b/sys/dev/cfe/cfe_api.c
@@ -1,5 +1,4 @@
-/* $NetBSD: cfe_api.c,v 1.5 2005/12/11 12:18:07 christos Exp $ */
-/* from: SiByte Id: cfe_api.c,v 1.16 2002/07/09 23:29:11 cgd Exp $ */
+/* from: Broadcom Id: cfe_api.c,v 1.18 2006/08/24 02:13:56 binh Exp $ */
/*-
* Copyright 2000, 2001, 2002
@@ -177,6 +176,27 @@ cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen)
}
#endif /* CFE_API_enumenv || CFE_API_ALL */
+#if defined(CFE_API_enumdev) || defined(CFE_API_ALL)
+int
+cfe_enumdev(int idx, char *name, int namelen)
+{
+ cfe_xiocb_t xiocb;
+
+ xiocb.xiocb_fcode = CFE_CMD_DEV_ENUM;
+ xiocb.xiocb_status = 0;
+ xiocb.xiocb_handle = 0;
+ xiocb.xiocb_flags = 0;
+ xiocb.xiocb_psize = sizeof(xiocb_envbuf_t);
+ xiocb.plist.xiocb_envbuf.enum_idx = idx;
+ xiocb.plist.xiocb_envbuf.name_ptr = XPTR_FROM_NATIVE(name);
+ xiocb.plist.xiocb_envbuf.name_length = namelen;
+
+ cfe_iocb_dispatch(&xiocb);
+
+ return xiocb.xiocb_status;
+}
+#endif /* CFE_API_enumdev || CFE_API_ALL */
+
#if defined(CFE_API_enummem) || defined(CFE_API_ALL)
int
cfe_enummem(int idx, int flags, cfe_xuint_t *start, cfe_xuint_t *length,
diff --git a/sys/dev/cfe/cfe_api.h b/sys/dev/cfe/cfe_api.h
index 943459dd7164..ade7a81c1842 100644
--- a/sys/dev/cfe/cfe_api.h
+++ b/sys/dev/cfe/cfe_api.h
@@ -1,5 +1,4 @@
-/* $NetBSD: cfe_api.h,v 1.3 2003/02/07 17:38:48 cgd Exp $ */
-/* from: SiByte Id: cfe_api.h,v 1.29 2002/07/09 23:29:11 cgd Exp $ */
+/* from: Broadcom Id: cfe_api.h,v 1.31 2006/08/24 02:13:56 binh Exp $ */
/*-
* Copyright 2000, 2001, 2002
@@ -154,6 +153,7 @@ int64_t cfe_getticks(void);
#define cfe_cpu_start(a,b,c,d,e) __cfe_cpu_start(a,b,c,d,e)
#define cfe_cpu_stop(a) __cfe_cpu_stop(a)
#define cfe_enumenv(a,b,d,e,f) __cfe_enumenv(a,b,d,e,f)
+#define cfe_enumdev(a,b,c) __cfe_enumdev(a,b,c)
#define cfe_enummem(a,b,c,d,e) __cfe_enummem(a,b,c,d,e)
#define cfe_exit(a,b) __cfe_exit(a,b)
#define cfe_flushcache(a) __cfe_cacheflush(a)
@@ -176,6 +176,7 @@ int cfe_close(int handle);
int cfe_cpu_start(int cpu, void (*fn)(void), long sp, long gp, long a1);
int cfe_cpu_stop(int cpu);
int cfe_enumenv(int idx, char *name, int namelen, char *val, int vallen);
+int cfe_enumdev(int idx, char *name, int namelen);
int cfe_enummem(int idx, int flags, uint64_t *start, uint64_t *length,
uint64_t *type);
int cfe_exit(int warm,int status);
diff --git a/sys/dev/cfe/cfe_api_int.h b/sys/dev/cfe/cfe_api_int.h
index dfdde478f692..b15a17697087 100644
--- a/sys/dev/cfe/cfe_api_int.h
+++ b/sys/dev/cfe/cfe_api_int.h
@@ -1,5 +1,4 @@
-/* $NetBSD: cfe_api_int.h,v 1.2 2003/02/07 17:38:48 cgd Exp $ */
-/* from: SiByte Id: cfe_api_int.h,v 1.21 2002/07/09 23:29:11 cgd Exp $ */
+/* from: Broadcom Id: cfe_api_int.h,v 1.22 2003/02/07 17:27:56 cgd Exp $ */
/*-
* Copyright 2000, 2001, 2002
diff --git a/sys/dev/cfe/cfe_error.h b/sys/dev/cfe/cfe_error.h
index 6b57d14c6423..d32f1d9065dd 100644
--- a/sys/dev/cfe/cfe_error.h
+++ b/sys/dev/cfe/cfe_error.h
@@ -1,5 +1,4 @@
-/* $NetBSD: cfe_error.h,v 1.2 2003/02/07 17:38:48 cgd Exp $ */
-/* from: SiByte Id: cfe_error.h,v 1.2 2002/07/09 19:37:52 cgd Exp $ */
+/* from: Broadcom Id: cfe_error.h,v 1.3 2003/02/07 17:27:56 cgd Exp $ */
/*-
* Copyright 2000, 2001, 2002
diff --git a/sys/dev/cfe/cfe_ioctl.h b/sys/dev/cfe/cfe_ioctl.h
index a45ba1d004b8..c02e79d1f1e8 100644
--- a/sys/dev/cfe/cfe_ioctl.h
+++ b/sys/dev/cfe/cfe_ioctl.h
@@ -1,14 +1,13 @@
-/* $NetBSD: cfe_ioctl.h,v 1.2 2003/02/07 17:52:08 cgd Exp $ */
-
/*-
- * Copyright 2000, 2001
+ * Copyright 2000, 2001, 2002, 2003
* Broadcom Corporation. All rights reserved.
*
- * This software is furnished under license and may be used and copied only
- * in accordance with the following terms and conditions. Subject to these
- * conditions, you may download, copy, install, use, modify and distribute
- * modified or unmodified copies of this software in source and/or binary
- * form. No title or ownership is transferred hereby.
+ * This software is furnished under license and may be used and
+ * copied only in accordance with the following terms and
+ * conditions. Subject to these conditions, you may download,
+ * copy, install, use, modify and distribute modified or unmodified
+ * copies of this software in source and/or binary form. No title
+ * or ownership is transferred hereby.
*
* 1) Any source code used, modified or distributed must reproduce and
* retain this copyright notice and list of conditions as they appear in
@@ -41,7 +40,7 @@
*
* IOCTL function numbers and I/O data structures.
*
- * Author: Mitch Lichtenberg (mpl@broadcom.com)
+ * Author: Mitch Lichtenberg
*
********************************************************************* */
@@ -53,23 +52,115 @@
#define IOCTL_NVRAM_GETINFO 1 /* return nvram_info_t */
#define IOCTL_NVRAM_ERASE 2 /* erase sector containing nvram_info_t area */
#define IOCTL_FLASH_ERASE_SECTOR 3 /* erase an arbitrary sector */
-#define IOCTL_FLASH_ERASE_ALL 4 /* Erase the entire flash */
+#define IOCTL_FLASH_ERASE_ALL 4 /* Erase the entire flash */
+#define IOCTL_FLASH_WRITE_ALL 5 /* write entire flash */
+#define IOCTL_FLASH_GETINFO 6 /* get flash device info */
+#define IOCTL_FLASH_GETSECTORS 7 /* get sector information */
+#define IOCTL_FLASH_ERASE_RANGE 8 /* erase range of bytes */
+#define IOCTL_NVRAM_UNLOCK 9 /* allow r/w beyond logical end of device */
+#define IOCTL_FLASH_PROTECT_RANGE 10 /* Protect a group of sectors */
+#define IOCTL_FLASH_UNPROTECT_RANGE 11 /* unprotect a group of sectors */
+#define IOCTL_FLASH_DATA_WIDTH_MODE 12 /* switch flash and gen bus to support 8 or 16-bit mode I/Os */
+#define IOCTL_FLASH_BURST_MODE 13 /* configure gen bus for burst mode */
+
+typedef struct flash_range_s {
+ unsigned int range_base;
+ unsigned int range_length;
+} flash_range_t;
+
+typedef struct flash_info_s {
+ unsigned long long flash_base; /* flash physical base address */
+ unsigned int flash_size; /* available device size in bytes */
+ unsigned int flash_type; /* type, from FLASH_TYPE below */
+ unsigned int flash_flags; /* Various flags (FLASH_FLAG_xxx) */
+} flash_info_t;
+
+typedef struct flash_sector_s {
+ int flash_sector_idx;
+ int flash_sector_status;
+ unsigned int flash_sector_offset;
+ unsigned int flash_sector_size;
+} flash_sector_t;
+
+#define FLASH_SECTOR_OK 0
+#define FLASH_SECTOR_INVALID -1
+
+#define FLASH_TYPE_UNKNOWN 0 /* not sure what kind of flash */
+#define FLASH_TYPE_SRAM 1 /* not flash: it's SRAM */
+#define FLASH_TYPE_ROM 2 /* not flash: it's ROM */
+#define FLASH_TYPE_FLASH 3 /* it's flash memory of some sort */
+
+#define FLASH_FLAG_NOERASE 1 /* Byte-range writes supported,
+ Erasing is not necessary */
typedef struct nvram_info_s {
- int nvram_offset; /* offset of environment area */
- int nvram_size; /* size of environment area */
- int nvram_eraseflg; /* true if we need to erase first */
+ int nvram_offset; /* offset of environment area */
+ int nvram_size; /* size of environment area */
+ int nvram_eraseflg; /* true if we need to erase first */
} nvram_info_t;
/* *********************************************************************
* Ethernet stuff
********************************************************************* */
-#define IOCTL_ETHER_GETHWADDR 1
+#define IOCTL_ETHER_GETHWADDR 1 /* Get hardware address (6bytes) */
+#define IOCTL_ETHER_SETHWADDR 2 /* Set hardware address (6bytes) */
+#define IOCTL_ETHER_GETSPEED 3 /* Get Speed and Media (int) */
+#define IOCTL_ETHER_SETSPEED 4 /* Set Speed and Media (int) */
+#define IOCTL_ETHER_GETLINK 5 /* get link status (int) */
+#define IOCTL_ETHER_GETLOOPBACK 7 /* get loopback state */
+#define IOCTL_ETHER_SETLOOPBACK 8 /* set loopback state */
+#define IOCTL_ETHER_SETPACKETFIFO 9 /* set packet fifo mode (int) */
+#define IOCTL_ETHER_SETSTROBESIG 10 /* set strobe signal (int) */
+
+#define ETHER_LOOPBACK_OFF 0 /* no loopback */
+#define ETHER_LOOPBACK_INT 1 /* Internal loopback */
+#define ETHER_LOOPBACK_EXT 2 /* External loopback (through PHY) */
+
+#define ETHER_SPEED_AUTO 0 /* Auto detect */
+#define ETHER_SPEED_UNKNOWN 0 /* Speed not known (on link status) */
+#define ETHER_SPEED_10HDX 1 /* 10MB hdx and fdx */
+#define ETHER_SPEED_10FDX 2
+#define ETHER_SPEED_100HDX 3 /* 100MB hdx and fdx */
+#define ETHER_SPEED_100FDX 4
+#define ETHER_SPEED_1000HDX 5 /* 1000MB hdx and fdx */
+#define ETHER_SPEED_1000FDX 6
+
+#define ETHER_FIFO_8 0 /* 8-bit packet fifo mode */
+#define ETHER_FIFO_16 1 /* 16-bit packet fifo mode */
+#define ETHER_ETHER 2 /* Standard ethernet mode */
+
+#define ETHER_STROBE_GMII 0 /* GMII style strobe signal */
+#define ETHER_STROBE_ENCODED 1 /* Encoded */
+#define ETHER_STROBE_SOP 2 /* SOP flagged. Only in 8-bit mode*/
+#define ETHER_STROBE_EOP 3 /* EOP flagged. Only in 8-bit mode*/
+
+/* *********************************************************************
+ * Serial Ports
+ ********************************************************************* */
+
+#define IOCTL_SERIAL_SETSPEED 1 /* get baud rate (int) */
+#define IOCTL_SERIAL_GETSPEED 2 /* set baud rate (int) */
+#define IOCTL_SERIAL_SETFLOW 3 /* Set Flow Control */
+#define IOCTL_SERIAL_GETFLOW 4 /* Get Flow Control */
+
+#define SERIAL_FLOW_NONE 0 /* no flow control */
+#define SERIAL_FLOW_SOFTWARE 1 /* software flow control (not impl) */
+#define SERIAL_FLOW_HARDWARE 2 /* hardware flow control */
/* *********************************************************************
* Block device stuff
********************************************************************* */
-#define IOCTL_BLOCK_GETBLOCKSIZE 1
-#define IOCTL_BLOCK_GETTOTALBLOCKS 2
+#define IOCTL_BLOCK_GETBLOCKSIZE 1 /* get block size (int) */
+#define IOCTL_BLOCK_GETTOTALBLOCKS 2 /* get total bocks (long long) */
+#define IOCTL_BLOCK_GETDEVTYPE 3 /* get device type (struct) */
+
+typedef struct blockdev_info_s {
+ unsigned long long blkdev_totalblocks;
+ unsigned int blkdev_blocksize;
+ unsigned int blkdev_devtype;
+} blockdev_info_t;
+
+#define BLOCK_DEVTYPE_DISK 0
+#define BLOCK_DEVTYPE_CDROM 1
diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h
index d31124581efa..73af382dcd6a 100644
--- a/sys/dev/cxgbe/adapter.h
+++ b/sys/dev/cxgbe/adapter.h
@@ -195,6 +195,7 @@ enum {
ADAP_SYSCTL_CTX = (1 << 4),
/* TOM_INIT_DONE= (1 << 5), No longer used */
BUF_PACKING_OK = (1 << 6),
+ IS_VF = (1 << 7),
CXGBE_BUSY = (1 << 9),
@@ -429,6 +430,7 @@ enum {DOORBELL_UDB, DOORBELL_WCWR, DOORBELL_UDBWC, DOORBELL_KDB};
struct sge_eq {
unsigned int flags; /* MUST be first */
unsigned int cntxt_id; /* SGE context id for the eq */
+ unsigned int abs_id; /* absolute SGE id for the eq */
struct mtx eq_lock;
struct tx_desc *desc; /* KVA of descriptor ring */
@@ -737,8 +739,10 @@ struct sge {
struct sge_nm_txq *nm_txq; /* netmap tx queues */
struct sge_nm_rxq *nm_rxq; /* netmap rx queues */
- uint16_t iq_start;
- int eq_start;
+ uint16_t iq_start; /* first cntxt_id */
+ uint16_t iq_base; /* first abs_id */
+ int eq_start; /* first cntxt_id */
+ int eq_base; /* first abs_id */
struct sge_iq **iqmap; /* iq->cntxt_id to iq mapping */
struct sge_eq **eqmap; /* eq->cntxt_id to eq mapping */
@@ -781,6 +785,8 @@ struct adapter {
struct sge_rxq *rxq;
struct sge_nm_rxq *nm_rxq;
} __aligned(CACHE_LINE_SIZE) *irq;
+ int sge_gts_reg;
+ int sge_kdoorbell_reg;
bus_dma_tag_t dmat; /* Parent DMA tag */
diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h
index cc6edf32a3c7..1cbdf8ee059a 100644
--- a/sys/dev/cxgbe/common/common.h
+++ b/sys/dev/cxgbe/common/common.h
@@ -215,6 +215,8 @@ struct sge_params {
int pad_boundary;
int pack_boundary;
int fl_pktshift;
+ u32 sge_control;
+ u32 sge_fl_buffer_size[SGE_FLBUF_SIZES];
};
struct tp_params {
diff --git a/sys/dev/cxgbe/common/t4_hw.c b/sys/dev/cxgbe/common/t4_hw.c
index 7ecdb81a44b8..ca6d2069a5f0 100644
--- a/sys/dev/cxgbe/common/t4_hw.c
+++ b/sys/dev/cxgbe/common/t4_hw.c
@@ -289,6 +289,14 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
if ((size & 15) || size > MBOX_LEN)
return -EINVAL;
+ if (adap->flags & IS_VF) {
+ if (is_t6(adap))
+ data_reg = FW_T6VF_MBDATA_BASE_ADDR;
+ else
+ data_reg = FW_T4VF_MBDATA_BASE_ADDR;
+ ctl_reg = VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL);
+ }
+
/*
* If we have a negative timeout, that implies that we can't sleep.
*/
@@ -343,6 +351,22 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
for (i = 0; i < size; i += 8, p++)
t4_write_reg64(adap, data_reg + i, be64_to_cpu(*p));
+ if (adap->flags & IS_VF) {
+ /*
+ * For the VFs, the Mailbox Data "registers" are
+ * actually backed by T4's "MA" interface rather than
+ * PL Registers (as is the case for the PFs). Because
+ * these are in different coherency domains, the write
+ * to the VF's PL-register-backed Mailbox Control can
+ * race in front of the writes to the MA-backed VF
+ * Mailbox Data "registers". So we need to do a
+ * read-back on at least one byte of the VF Mailbox
+ * Data registers before doing the write to the VF
+ * Mailbox Control register.
+ */
+ t4_read_reg(adap, data_reg);
+ }
+
CH_DUMP_MBOX(adap, mbox, data_reg);
t4_write_reg(adap, ctl_reg, F_MBMSGVALID | V_MBOWNER(X_MBOWNER_FW));
@@ -355,10 +379,13 @@ int t4_wr_mbox_meat_timeout(struct adapter *adap, int mbox, const void *cmd,
* Loop waiting for the reply; bail out if we time out or the firmware
* reports an error.
*/
- for (i = 0;
- !((pcie_fw = t4_read_reg(adap, A_PCIE_FW)) & F_PCIE_FW_ERR) &&
- i < timeout;
- i += ms) {
+ pcie_fw = 0;
+ for (i = 0; i < timeout; i += ms) {
+ if (!(adap->flags & IS_VF)) {
+ pcie_fw = t4_read_reg(adap, A_PCIE_FW);
+ if (pcie_fw & F_PCIE_FW_ERR)
+ break;
+ }
if (sleep_ok) {
ms = delay[delay_idx]; /* last element may repeat */
if (delay_idx < ARRAY_SIZE(delay) - 1)
@@ -698,10 +725,14 @@ unsigned int t4_get_regs_len(struct adapter *adapter)
switch (chip_version) {
case CHELSIO_T4:
+ if (adapter->flags & IS_VF)
+ return FW_T4VF_REGMAP_SIZE;
return T4_REGMAP_SIZE;
case CHELSIO_T5:
case CHELSIO_T6:
+ if (adapter->flags & IS_VF)
+ return FW_T4VF_REGMAP_SIZE;
return T5_REGMAP_SIZE;
}
@@ -1180,6 +1211,18 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x27e00, 0x27e04,
};
+ static const unsigned int t4vf_reg_ranges[] = {
+ VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
+ VF_MPS_REG(A_MPS_VF_CTL),
+ VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
+ VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_WHOAMI),
+ VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
+ VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
+ FW_T4VF_MBDATA_BASE_ADDR,
+ FW_T4VF_MBDATA_BASE_ADDR +
+ ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
+ };
+
static const unsigned int t5_reg_ranges[] = {
0x1008, 0x10c0,
0x10cc, 0x10f8,
@@ -1955,6 +1998,18 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x51300, 0x51308,
};
+ static const unsigned int t5vf_reg_ranges[] = {
+ VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
+ VF_MPS_REG(A_MPS_VF_CTL),
+ VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
+ VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
+ VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
+ VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
+ FW_T4VF_MBDATA_BASE_ADDR,
+ FW_T4VF_MBDATA_BASE_ADDR +
+ ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
+ };
+
static const unsigned int t6_reg_ranges[] = {
0x1008, 0x101c,
0x1024, 0x10a8,
@@ -2532,6 +2587,18 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
0x51300, 0x51324,
};
+ static const unsigned int t6vf_reg_ranges[] = {
+ VF_SGE_REG(A_SGE_VF_KDOORBELL), VF_SGE_REG(A_SGE_VF_GTS),
+ VF_MPS_REG(A_MPS_VF_CTL),
+ VF_MPS_REG(A_MPS_VF_STAT_RX_VF_ERR_FRAMES_H),
+ VF_PL_REG(A_PL_VF_WHOAMI), VF_PL_REG(A_PL_VF_REVISION),
+ VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_CTRL),
+ VF_CIM_REG(A_CIM_VF_EXT_MAILBOX_STATUS),
+ FW_T6VF_MBDATA_BASE_ADDR,
+ FW_T6VF_MBDATA_BASE_ADDR +
+ ((NUM_CIM_PF_MAILBOX_DATA_INSTANCES - 1) * 4),
+ };
+
u32 *buf_end = (u32 *)(buf + buf_size);
const unsigned int *reg_ranges;
int reg_ranges_size, range;
@@ -2543,18 +2610,33 @@ void t4_get_regs(struct adapter *adap, u8 *buf, size_t buf_size)
*/
switch (chip_version) {
case CHELSIO_T4:
- reg_ranges = t4_reg_ranges;
- reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
+ if (adap->flags & IS_VF) {
+ reg_ranges = t4vf_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t4vf_reg_ranges);
+ } else {
+ reg_ranges = t4_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t4_reg_ranges);
+ }
break;
case CHELSIO_T5:
- reg_ranges = t5_reg_ranges;
- reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+ if (adap->flags & IS_VF) {
+ reg_ranges = t5vf_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t5vf_reg_ranges);
+ } else {
+ reg_ranges = t5_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t5_reg_ranges);
+ }
break;
case CHELSIO_T6:
- reg_ranges = t6_reg_ranges;
- reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ if (adap->flags & IS_VF) {
+ reg_ranges = t6vf_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6vf_reg_ranges);
+ } else {
+ reg_ranges = t6_reg_ranges;
+ reg_ranges_size = ARRAY_SIZE(t6_reg_ranges);
+ }
break;
default:
@@ -7644,6 +7726,7 @@ int t4_init_sge_params(struct adapter *adapter)
{
u32 r;
struct sge_params *sp = &adapter->params.sge;
+ unsigned i;
r = t4_read_reg(adapter, A_SGE_INGRESS_RX_THRESHOLD);
sp->counter_val[0] = G_THRESHOLD_0(r);
@@ -7686,6 +7769,7 @@ int t4_init_sge_params(struct adapter *adapter)
sp->page_shift = (r & M_HOSTPAGESIZEPF0) + 10;
r = t4_read_reg(adapter, A_SGE_CONTROL);
+ sp->sge_control = r;
sp->spg_len = r & F_EGRSTATUSPAGESIZE ? 128 : 64;
sp->fl_pktshift = G_PKTSHIFT(r);
sp->pad_boundary = 1 << (G_INGPADBOUNDARY(r) + 5);
@@ -7698,6 +7782,9 @@ int t4_init_sge_params(struct adapter *adapter)
else
sp->pack_boundary = 1 << (G_INGPACKBOUNDARY(r) + 5);
}
+ for (i = 0; i < SGE_FLBUF_SIZES; i++)
+ sp->sge_fl_buffer_size[i] = t4_read_reg(adapter,
+ A_SGE_FL_BUFFER_SIZE0 + (4 * i));
return 0;
}
diff --git a/sys/dev/cxgbe/iw_cxgbe/cq.c b/sys/dev/cxgbe/iw_cxgbe/cq.c
index b40ffc7fb6c1..5c040e97377d 100644
--- a/sys/dev/cxgbe/iw_cxgbe/cq.c
+++ b/sys/dev/cxgbe/iw_cxgbe/cq.c
@@ -172,7 +172,7 @@ create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
cq->gen = 1;
cq->gts = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
- MYPF_REG(SGE_PF_GTS));
+ sc->sge_gts_reg);
cq->rdev = rdev;
if (user) {
diff --git a/sys/dev/cxgbe/iw_cxgbe/qp.c b/sys/dev/cxgbe/iw_cxgbe/qp.c
index d1be854c3471..5311b2c8577a 100644
--- a/sys/dev/cxgbe/iw_cxgbe/qp.c
+++ b/sys/dev/cxgbe/iw_cxgbe/qp.c
@@ -182,9 +182,9 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
pci_unmap_addr_set(&wq->rq, mapping, wq->rq.dma_addr);
wq->db = (void *)((unsigned long)rman_get_virtual(sc->regs_res) +
- MYPF_REG(SGE_PF_KDOORBELL));
+ sc->sge_kdoorbell_reg);
wq->gts = (void *)((unsigned long)rman_get_virtual(rdev->adap->regs_res)
- + MYPF_REG(SGE_PF_GTS));
+ + sc->sge_gts_reg);
if (user) {
wq->sq.udb = (u64)((char*)rman_get_virtual(rdev->adap->udbs_res) +
(wq->sq.qid << rdev->qpshift));
diff --git a/sys/dev/cxgbe/offload.h b/sys/dev/cxgbe/offload.h
index 22612d5156a1..cb0006c00a20 100644
--- a/sys/dev/cxgbe/offload.h
+++ b/sys/dev/cxgbe/offload.h
@@ -147,6 +147,7 @@ struct tom_tunables {
int ddp;
int rx_coalesce;
int tx_align;
+ int tx_zcopy;
};
#ifdef TCP_OFFLOAD
diff --git a/sys/dev/cxgbe/t4_if.m b/sys/dev/cxgbe/t4_if.m
index 430b4801dfa7..59e86bdab075 100644
--- a/sys/dev/cxgbe/t4_if.m
+++ b/sys/dev/cxgbe/t4_if.m
@@ -55,11 +55,11 @@ METHOD int detach_child {
device_t dev;
};
-# Called by a driver to query the PF4 driver for the unit number to use
-# for a given port. If the port is not enabled on the adapter, this
-# will fail.
-METHOD int read_port_unit {
+# Called by a driver to query the PF4 driver for the child device
+# associated with a given port. If the port is not enabled on the adapter,
+# this will fail.
+METHOD int read_port_device {
device_t dev;
int port;
- int *unit;
+ device_t *child;
};
diff --git a/sys/dev/cxgbe/t4_iov.c b/sys/dev/cxgbe/t4_iov.c
index 9ac1de5c8e4a..d0c31e82b330 100644
--- a/sys/dev/cxgbe/t4_iov.c
+++ b/sys/dev/cxgbe/t4_iov.c
@@ -104,6 +104,7 @@ t4iov_probe(device_t dev)
for (i = 0; i < nitems(t4iov_pciids); i++) {
if (d == t4iov_pciids[i].device) {
device_set_desc(dev, t4iov_pciids[i].desc);
+ device_quiet(dev);
return (BUS_PROBE_DEFAULT);
}
}
@@ -120,6 +121,7 @@ t5iov_probe(device_t dev)
for (i = 0; i < nitems(t5iov_pciids); i++) {
if (d == t5iov_pciids[i].device) {
device_set_desc(dev, t5iov_pciids[i].desc);
+ device_quiet(dev);
return (BUS_PROBE_DEFAULT);
}
}
@@ -148,25 +150,27 @@ t4iov_attach_child(device_t dev)
#ifdef PCI_IOV
nvlist_t *pf_schema, *vf_schema;
#endif
- int error, unit;
+ device_t pdev;
+ int error;
sc = device_get_softc(dev);
MPASS(!sc->sc_attached);
/*
* PF0-3 are associated with a specific port on the NIC (PF0
- * with port 0, etc.). Ask the PF4 driver for the unit number
- * for this function's associated port to determine if the port
- * is present.
+ * with port 0, etc.). Ask the PF4 driver for the device for
+ * this function's associated port to determine if the port is
+ * present.
*/
- error = T4_READ_PORT_UNIT(sc->sc_main, pci_get_function(dev), &unit);
+ error = T4_READ_PORT_DEVICE(sc->sc_main, pci_get_function(dev), &pdev);
if (error)
return (0);
#ifdef PCI_IOV
pf_schema = pci_iov_schema_alloc_node();
vf_schema = pci_iov_schema_alloc_node();
- error = pci_iov_attach(dev, pf_schema, vf_schema);
+ error = pci_iov_attach_name(dev, pf_schema, vf_schema, "%s",
+ device_get_nameunit(pdev));
if (error) {
device_printf(dev, "Failed to initialize SR-IOV: %d\n", error);
return (0);
diff --git a/sys/dev/cxgbe/t4_main.c b/sys/dev/cxgbe/t4_main.c
index 773d50ac033b..6b7edaf215d2 100644
--- a/sys/dev/cxgbe/t4_main.c
+++ b/sys/dev/cxgbe/t4_main.c
@@ -83,14 +83,14 @@ static int t4_probe(device_t);
static int t4_attach(device_t);
static int t4_detach(device_t);
static int t4_ready(device_t);
-static int t4_read_port_unit(device_t, int, int *);
+static int t4_read_port_device(device_t, int, device_t *);
static device_method_t t4_methods[] = {
DEVMETHOD(device_probe, t4_probe),
DEVMETHOD(device_attach, t4_attach),
DEVMETHOD(device_detach, t4_detach),
DEVMETHOD(t4_is_main_ready, t4_ready),
- DEVMETHOD(t4_read_port_unit, t4_read_port_unit),
+ DEVMETHOD(t4_read_port_device, t4_read_port_device),
DEVMETHOD_END
};
@@ -134,14 +134,9 @@ static driver_t vcxgbe_driver = {
};
static d_ioctl_t t4_ioctl;
-static d_open_t t4_open;
-static d_close_t t4_close;
static struct cdevsw t4_cdevsw = {
.d_version = D_VERSION,
- .d_flags = 0,
- .d_open = t4_open,
- .d_close = t4_close,
.d_ioctl = t4_ioctl,
.d_name = "t4nex",
};
@@ -154,7 +149,7 @@ static device_method_t t5_methods[] = {
DEVMETHOD(device_detach, t4_detach),
DEVMETHOD(t4_is_main_ready, t4_ready),
- DEVMETHOD(t4_read_port_unit, t4_read_port_unit),
+ DEVMETHOD(t4_read_port_device, t4_read_port_device),
DEVMETHOD_END
};
@@ -179,15 +174,6 @@ static driver_t vcxl_driver = {
sizeof(struct vi_info)
};
-static struct cdevsw t5_cdevsw = {
- .d_version = D_VERSION,
- .d_flags = 0,
- .d_open = t4_open,
- .d_close = t4_close,
- .d_ioctl = t4_ioctl,
- .d_name = "t5nex",
-};
-
/* ifnet + media interface */
static void cxgbe_init(void *);
static int cxgbe_ioctl(struct ifnet *, unsigned long, caddr_t);
@@ -681,6 +667,7 @@ t4_attach(device_t dev)
{
struct adapter *sc;
int rc = 0, i, j, n10g, n1g, rqidx, tqidx;
+ struct make_dev_args mda;
struct intrs_and_queues iaq;
struct sge *s;
uint8_t *buf;
@@ -710,6 +697,8 @@ t4_attach(device_t dev)
sc->params.pci.mps = 128 << ((v & PCIEM_CTL_MAX_PAYLOAD) >> 5);
}
+ sc->sge_gts_reg = MYPF_REG(A_SGE_PF_GTS);
+ sc->sge_kdoorbell_reg = MYPF_REG(A_SGE_PF_KDOORBELL);
sc->traceq = -1;
mtx_init(&sc->ifp_lock, sc->ifp_lockname, 0, MTX_DEF);
snprintf(sc->ifp_lockname, sizeof(sc->ifp_lockname), "%s tracer",
@@ -760,13 +749,16 @@ t4_attach(device_t dev)
setup_memwin(sc);
if (t4_init_devlog_params(sc, 0) == 0)
fixup_devlog_params(sc);
- sc->cdev = make_dev(is_t4(sc) ? &t4_cdevsw : &t5_cdevsw,
- device_get_unit(dev), UID_ROOT, GID_WHEEL, 0600, "%s",
- device_get_nameunit(dev));
- if (sc->cdev == NULL)
- device_printf(dev, "failed to create nexus char device.\n");
- else
- sc->cdev->si_drv1 = sc;
+ make_dev_args_init(&mda);
+ mda.mda_devsw = &t4_cdevsw;
+ mda.mda_uid = UID_ROOT;
+ mda.mda_gid = GID_WHEEL;
+ mda.mda_mode = 0600;
+ mda.mda_si_drv1 = sc;
+ rc = make_dev_s(&mda, &sc->cdev, "%s", device_get_nameunit(dev));
+ if (rc != 0)
+ device_printf(dev, "failed to create nexus char device: %d.\n",
+ rc);
/* Go no further if recovery mode has been requested. */
if (TUNABLE_INT_FETCH("hw.cxgbe.sos", &i) && i != 0) {
@@ -1102,7 +1094,7 @@ t4_ready(device_t dev)
}
static int
-t4_read_port_unit(device_t dev, int port, int *unit)
+t4_read_port_device(device_t dev, int port, device_t *child)
{
struct adapter *sc;
struct port_info *pi;
@@ -1113,7 +1105,7 @@ t4_read_port_unit(device_t dev, int port, int *unit)
pi = sc->port[port];
if (pi == NULL || pi->dev == NULL)
return (ENXIO);
- *unit = device_get_unit(pi->dev);
+ *child = pi->dev;
return (0);
}
@@ -3340,6 +3332,8 @@ get_params__post_init(struct adapter *sc)
sc->vres.iscsi.size = val[1] - val[0] + 1;
}
+ t4_init_sge_params(sc);
+
/*
* We've got the params we wanted to query via the firmware. Now grab
* some others directly from the chip.
@@ -4372,7 +4366,7 @@ t4_alloc_irq(struct adapter *sc, struct irq *irq, int rid,
"failed to setup interrupt for rid %d, name %s: %d\n",
rid, name, rc);
} else if (name)
- bus_describe_intr(sc->dev, irq->res, irq->tag, name);
+ bus_describe_intr(sc->dev, irq->res, irq->tag, "%s", name);
return (rc);
}
@@ -4864,6 +4858,11 @@ t4_sysctls(struct adapter *sc)
SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_align",
CTLFLAG_RW, &sc->tt.tx_align, 0, "chop and align payload");
+ sc->tt.tx_zcopy = 0;
+ SYSCTL_ADD_INT(ctx, children, OID_AUTO, "tx_zcopy",
+ CTLFLAG_RW, &sc->tt.tx_zcopy, 0,
+ "Enable zero-copy aio_write(2)");
+
SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "timer_tick",
CTLTYPE_STRING | CTLFLAG_RD, sc, 0, sysctl_tp_tick, "A",
"TP timer tick (us)");
@@ -7909,11 +7908,6 @@ set_filter(struct adapter *sc, struct t4_filter *t)
goto done;
}
- if (!(sc->flags & FULL_INIT_DONE)) {
- rc = EAGAIN;
- goto done;
- }
-
if (t->idx >= nfilters) {
rc = EINVAL;
goto done;
@@ -7947,6 +7941,10 @@ set_filter(struct adapter *sc, struct t4_filter *t)
goto done;
}
+ if (!(sc->flags & FULL_INIT_DONE) &&
+ ((rc = adapter_full_init(sc)) != 0))
+ goto done;
+
if (sc->tids.ftid_tab == NULL) {
KASSERT(sc->tids.ftids_in_use == 0,
("%s: no memory allocated but filters_in_use > 0",
@@ -8723,18 +8721,6 @@ t4_iterate(void (*func)(struct adapter *, void *), void *arg)
}
static int
-t4_open(struct cdev *dev, int flags, int type, struct thread *td)
-{
- return (0);
-}
-
-static int
-t4_close(struct cdev *dev, int flags, int type, struct thread *td)
-{
- return (0);
-}
-
-static int
t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
struct thread *td)
{
@@ -8779,7 +8765,7 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
}
case CHELSIO_T4_REGDUMP: {
struct t4_regdump *regs = (struct t4_regdump *)data;
- int reglen = is_t4(sc) ? T4_REGDUMP_SIZE : T5_REGDUMP_SIZE;
+ int reglen = t4_get_regs_len(sc);
uint8_t *buf;
if (regs->len < reglen) {
@@ -8905,7 +8891,7 @@ t4_ioctl(struct cdev *dev, unsigned long cmd, caddr_t data, int fflag,
rc = t4_set_tracer(sc, (struct t4_tracer *)data);
break;
default:
- rc = EINVAL;
+ rc = ENOTTY;
}
return (rc);
diff --git a/sys/dev/cxgbe/t4_netmap.c b/sys/dev/cxgbe/t4_netmap.c
index f6c96e13fb78..d7356e0bdb43 100644
--- a/sys/dev/cxgbe/t4_netmap.c
+++ b/sys/dev/cxgbe/t4_netmap.c
@@ -204,7 +204,7 @@ alloc_nm_rxq_hwq(struct vi_info *vi, struct sge_nm_rxq *nm_rxq, int cong)
}
}
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ t4_write_reg(sc, sc->sge_gts_reg,
V_INGRESSQID(nm_rxq->iq_cntxt_id) |
V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
@@ -364,7 +364,7 @@ cxgbe_netmap_on(struct adapter *sc, struct vi_info *vi, struct ifnet *ifp,
MPASS((j & 7) == 0);
j /= 8; /* driver pidx to hardware pidx */
wmb();
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ t4_write_reg(sc, sc->sge_kdoorbell_reg,
nm_rxq->fl_db_val | V_PIDX(j));
atomic_cmpset_int(&irq->nm_state, NM_OFF, NM_ON);
@@ -537,7 +537,7 @@ ring_nm_txq_db(struct adapter *sc, struct sge_nm_txq *nm_txq)
break;
case DOORBELL_KDB:
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ t4_write_reg(sc, sc->sge_kdoorbell_reg,
V_QID(nm_txq->cntxt_id) | V_PIDX(n));
break;
}
@@ -818,7 +818,7 @@ cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
}
if (++dbinc == 8 && n >= 32) {
wmb();
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ t4_write_reg(sc, sc->sge_kdoorbell_reg,
nm_rxq->fl_db_val | V_PIDX(dbinc));
dbinc = 0;
}
@@ -827,7 +827,7 @@ cxgbe_netmap_rxsync(struct netmap_kring *kring, int flags)
if (dbinc > 0) {
wmb();
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ t4_write_reg(sc, sc->sge_kdoorbell_reg,
nm_rxq->fl_db_val | V_PIDX(dbinc));
}
}
@@ -981,14 +981,14 @@ t4_nm_intr(void *arg)
fl_credits /= 8;
IDXINCR(nm_rxq->fl_pidx, fl_credits * 8,
nm_rxq->fl_sidx);
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ t4_write_reg(sc, sc->sge_kdoorbell_reg,
nm_rxq->fl_db_val | V_PIDX(fl_credits));
fl_credits = fl_cidx & 7;
} else if (!black_hole) {
netmap_rx_irq(ifp, nm_rxq->nid, &work);
MPASS(work != 0);
}
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ t4_write_reg(sc, sc->sge_gts_reg,
V_CIDXINC(n) | V_INGRESSQID(nm_rxq->iq_cntxt_id) |
V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
n = 0;
@@ -999,12 +999,12 @@ t4_nm_intr(void *arg)
if (black_hole) {
fl_credits /= 8;
IDXINCR(nm_rxq->fl_pidx, fl_credits * 8, nm_rxq->fl_sidx);
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ t4_write_reg(sc, sc->sge_kdoorbell_reg,
nm_rxq->fl_db_val | V_PIDX(fl_credits));
} else
netmap_rx_irq(ifp, nm_rxq->nid, &work);
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(n) |
+ t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(n) |
V_INGRESSQID((u32)nm_rxq->iq_cntxt_id) |
V_SEINTARM(V_QINTR_TIMER_IDX(holdoff_tmr_idx)));
}
diff --git a/sys/dev/cxgbe/t4_sge.c b/sys/dev/cxgbe/t4_sge.c
index cfccdbc42d53..bd949b097fa0 100644
--- a/sys/dev/cxgbe/t4_sge.c
+++ b/sys/dev/cxgbe/t4_sge.c
@@ -81,7 +81,7 @@ __FBSDID("$FreeBSD$");
* Ethernet frames are DMA'd at this byte offset into the freelist buffer.
* 0-7 are valid values.
*/
-int fl_pktshift = 2;
+static int fl_pktshift = 2;
TUNABLE_INT("hw.cxgbe.fl_pktshift", &fl_pktshift);
/*
@@ -98,7 +98,7 @@ TUNABLE_INT("hw.cxgbe.fl_pad", &fl_pad);
* -1: driver should figure out a good value.
* 64 or 128 are the only other valid values.
*/
-int spg_len = -1;
+static int spg_len = -1;
TUNABLE_INT("hw.cxgbe.spg_len", &spg_len);
/*
@@ -590,7 +590,7 @@ t4_tweak_chip_settings(struct adapter *sc)
/*
* SGE wants the buffer to be at least 64B and then a multiple of 16. If
- * padding is is use the buffer's start and end need to be aligned to the pad
+ * padding is in use, the buffer's start and end need to be aligned to the pad
* boundary as well. We'll just make sure that the size is a multiple of the
* boundary here, it is up to the buffer allocation code to make sure the start
* of the buffer is aligned as well.
@@ -625,11 +625,9 @@ t4_read_chip_settings(struct adapter *sc)
struct sw_zone_info *swz, *safe_swz;
struct hw_buf_info *hwb;
- t4_init_sge_params(sc);
-
m = F_RXPKTCPLMODE;
v = F_RXPKTCPLMODE;
- r = t4_read_reg(sc, A_SGE_CONTROL);
+ r = sc->params.sge.sge_control;
if ((r & m) != v) {
device_printf(sc->dev, "invalid SGE_CONTROL(0x%x)\n", r);
rc = EINVAL;
@@ -647,7 +645,7 @@ t4_read_chip_settings(struct adapter *sc)
/* Filter out unusable hw buffer sizes entirely (mark with -2). */
hwb = &s->hw_buf_info[0];
for (i = 0; i < nitems(s->hw_buf_info); i++, hwb++) {
- r = t4_read_reg(sc, A_SGE_FL_BUFFER_SIZE0 + (4 * i));
+ r = sc->params.sge.sge_fl_buffer_size[i];
hwb->size = r;
hwb->zidx = hwsz_ok(sc, r) ? -1 : -2;
hwb->next = -1;
@@ -1444,7 +1442,8 @@ service_iq(struct sge_iq *iq, int budget)
break;
}
- q = sc->sge.iqmap[lq - sc->sge.iq_start];
+ q = sc->sge.iqmap[lq - sc->sge.iq_start -
+ sc->sge.iq_base];
if (atomic_cmpset_int(&q->state, IQS_IDLE,
IQS_BUSY)) {
if (service_iq(q, q->qsize / 16) == 0) {
@@ -1474,7 +1473,7 @@ service_iq(struct sge_iq *iq, int budget)
d = &iq->desc[0];
}
if (__predict_false(++ndescs == limit)) {
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
+ t4_write_reg(sc, sc->sge_gts_reg,
V_CIDXINC(ndescs) |
V_INGRESSQID(iq->cntxt_id) |
V_SEINTARM(V_QINTR_TIMER_IDX(X_TIMERREG_UPDATE_CIDX)));
@@ -1529,7 +1528,7 @@ process_iql:
}
#endif
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) |
+ t4_write_reg(sc, sc->sge_gts_reg, V_CIDXINC(ndescs) |
V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_params));
if (iq->flags & IQ_HAS_FL) {
@@ -2793,7 +2792,7 @@ alloc_iq_fl(struct vi_info *vi, struct sge_iq *iq, struct sge_fl *fl,
/* Enable IQ interrupts */
atomic_store_rel_int(&iq->state, IQS_IDLE);
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_SEINTARM(iq->intr_params) |
+ t4_write_reg(sc, sc->sge_gts_reg, V_SEINTARM(iq->intr_params) |
V_INGRESSQID(iq->cntxt_id));
return (0);
@@ -2972,6 +2971,7 @@ alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
struct sysctl_oid *oid)
{
int rc;
+ struct adapter *sc = vi->pi->adapter;
struct sysctl_oid_list *children;
char name[16];
@@ -2980,12 +2980,20 @@ alloc_rxq(struct vi_info *vi, struct sge_rxq *rxq, int intr_idx, int idx,
if (rc != 0)
return (rc);
+ if (idx == 0)
+ sc->sge.iq_base = rxq->iq.abs_id - rxq->iq.cntxt_id;
+ else
+ KASSERT(rxq->iq.cntxt_id + sc->sge.iq_base == rxq->iq.abs_id,
+ ("iq_base mismatch"));
+ KASSERT(sc->sge.iq_base == 0 || sc->flags & IS_VF,
+ ("PF with non-zero iq_base"));
+
/*
* The freelist is just barely above the starvation threshold right now,
* fill it up a bit more.
*/
FL_LOCK(&rxq->fl);
- refill_fl(vi->pi->adapter, &rxq->fl, 128);
+ refill_fl(sc, &rxq->fl, 128);
FL_UNLOCK(&rxq->fl);
#if defined(INET) || defined(INET6)
@@ -3317,6 +3325,7 @@ eth_eq_alloc(struct adapter *sc, struct vi_info *vi, struct sge_eq *eq)
eq->flags |= EQ_ALLOCATED;
eq->cntxt_id = G_FW_EQ_ETH_CMD_EQID(be32toh(c.eqid_pkd));
+ eq->abs_id = G_FW_EQ_ETH_CMD_PHYSEQID(be32toh(c.physeqid_pkd));
cntxt_id = eq->cntxt_id - sc->sge.eq_start;
if (cntxt_id >= sc->sge.neq)
panic("%s: eq->cntxt_id (%d) more than the max (%d)", __func__,
@@ -3557,6 +3566,14 @@ alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
/* Can't fail after this point. */
+ if (idx == 0)
+ sc->sge.eq_base = eq->abs_id - eq->cntxt_id;
+ else
+ KASSERT(eq->cntxt_id + sc->sge.eq_base == eq->abs_id,
+ ("eq_base mismatch"));
+ KASSERT(sc->sge.eq_base == 0 || sc->flags & IS_VF,
+ ("PF with non-zero eq_base"));
+
TASK_INIT(&txq->tx_reclaim_task, 0, tx_reclaim, eq);
txq->ifp = vi->ifp;
txq->gl = sglist_alloc(TX_SGL_SEGS, M_WAITOK);
@@ -3572,6 +3589,8 @@ alloc_txq(struct vi_info *vi, struct sge_txq *txq, int idx,
NULL, "tx queue");
children = SYSCTL_CHILDREN(oid);
+ SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "abs_id", CTLFLAG_RD,
+ &eq->abs_id, 0, "absolute id of the queue");
SYSCTL_ADD_UINT(&vi->ctx, children, OID_AUTO, "cntxt_id", CTLFLAG_RD,
&eq->cntxt_id, 0, "SGE context id of the queue");
SYSCTL_ADD_PROC(&vi->ctx, children, OID_AUTO, "cidx",
@@ -3676,7 +3695,7 @@ ring_fl_db(struct adapter *sc, struct sge_fl *fl)
if (fl->udb)
*fl->udb = htole32(v);
else
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL), v);
+ t4_write_reg(sc, sc->sge_kdoorbell_reg, v);
IDXINCR(fl->dbidx, n, fl->sidx);
}
@@ -4409,7 +4428,7 @@ ring_eq_db(struct adapter *sc, struct sge_eq *eq, u_int n)
break;
case DOORBELL_KDB:
- t4_write_reg(sc, MYPF_REG(A_SGE_PF_KDOORBELL),
+ t4_write_reg(sc, sc->sge_kdoorbell_reg,
V_QID(eq->cntxt_id) | V_PIDX(n));
break;
}
@@ -4755,7 +4774,7 @@ handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss,
KASSERT(m == NULL, ("%s: payload with opcode %02x", __func__,
rss->opcode));
- eq = s->eqmap[qid - s->eq_start];
+ eq = s->eqmap[qid - s->eq_start - s->eq_base];
(*h[eq->flags & EQ_TYPEMASK])(sc, eq);
return (0);
diff --git a/sys/dev/cxgbe/tom/t4_cpl_io.c b/sys/dev/cxgbe/tom/t4_cpl_io.c
index f7ef499f4488..c7e0661862e0 100644
--- a/sys/dev/cxgbe/tom/t4_cpl_io.c
+++ b/sys/dev/cxgbe/tom/t4_cpl_io.c
@@ -32,15 +32,18 @@ __FBSDID("$FreeBSD$");
#ifdef TCP_OFFLOAD
#include <sys/param.h>
-#include <sys/types.h>
+#include <sys/aio.h>
+#include <sys/file.h>
#include <sys/kernel.h>
#include <sys/ktr.h>
#include <sys/module.h>
+#include <sys/proc.h>
#include <sys/protosw.h>
#include <sys/domain.h>
#include <sys/socket.h>
#include <sys/socketvar.h>
#include <sys/sglist.h>
+#include <sys/taskqueue.h>
#include <netinet/in.h>
#include <netinet/in_pcb.h>
#include <netinet/ip.h>
@@ -51,6 +54,14 @@ __FBSDID("$FreeBSD$");
#include <netinet/tcp_var.h>
#include <netinet/toecore.h>
+#include <security/mac/mac_framework.h>
+
+#include <vm/vm.h>
+#include <vm/vm_extern.h>
+#include <vm/pmap.h>
+#include <vm/vm_map.h>
+#include <vm/vm_page.h>
+
#include "common/common.h"
#include "common/t4_msg.h"
#include "common/t4_regs.h"
@@ -71,6 +82,34 @@ VNET_DECLARE(int, tcp_autorcvbuf_inc);
VNET_DECLARE(int, tcp_autorcvbuf_max);
#define V_tcp_autorcvbuf_max VNET(tcp_autorcvbuf_max)
+#define IS_AIOTX_MBUF(m) \
+ ((m)->m_flags & M_EXT && (m)->m_ext.ext_flags & EXT_FLAG_AIOTX)
+
+static void t4_aiotx_cancel(struct kaiocb *job);
+static void t4_aiotx_queue_toep(struct toepcb *toep);
+
+static size_t
+aiotx_mbuf_pgoff(struct mbuf *m)
+{
+ struct aiotx_buffer *ab;
+
+ MPASS(IS_AIOTX_MBUF(m));
+ ab = m->m_ext.ext_arg1;
+ return ((ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) % PAGE_SIZE);
+}
+
+static vm_page_t *
+aiotx_mbuf_pages(struct mbuf *m)
+{
+ struct aiotx_buffer *ab;
+ int npages;
+
+ MPASS(IS_AIOTX_MBUF(m));
+ ab = m->m_ext.ext_arg1;
+ npages = (ab->ps.offset + (uintptr_t)m->m_ext.ext_arg2) / PAGE_SIZE;
+ return (ab->ps.pages + npages);
+}
+
void
send_flowc_wr(struct toepcb *toep, struct flowc_tx_params *ftxp)
{
@@ -519,7 +558,11 @@ write_tx_sgl(void *dst, struct mbuf *start, struct mbuf *stop, int nsegs, int n)
i = -1;
for (m = start; m != stop; m = m->m_next) {
- rc = sglist_append(&sg, mtod(m, void *), m->m_len);
+ if (IS_AIOTX_MBUF(m))
+ rc = sglist_append_vmpages(&sg, aiotx_mbuf_pages(m),
+ aiotx_mbuf_pgoff(m), m->m_len);
+ else
+ rc = sglist_append(&sg, mtod(m, void *), m->m_len);
if (__predict_false(rc != 0))
panic("%s: sglist_append %d", __func__, rc);
@@ -579,6 +622,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
struct sockbuf *sb = &so->so_snd;
int tx_credits, shove, compl, sowwakeup;
struct ofld_tx_sdesc *txsd = &toep->txsd[toep->txsd_pidx];
+ bool aiotx_mbuf_seen;
INP_WLOCK_ASSERT(inp);
KASSERT(toep->flags & TPF_FLOWC_WR_SENT,
@@ -589,6 +633,10 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
toep->ulp_mode == ULP_MODE_RDMA,
("%s: ulp_mode %u for toep %p", __func__, toep->ulp_mode, toep));
+#ifdef VERBOSE_TRACES
+ CTR4(KTR_CXGBE, "%s: tid %d toep flags %#x tp flags %#x drop %d",
+ __func__, toep->tid, toep->flags, tp->t_flags);
+#endif
if (__predict_false(toep->flags & TPF_ABORT_SHUTDOWN))
return;
@@ -618,8 +666,15 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
plen = 0;
nsegs = 0;
max_nsegs_1mbuf = 0; /* max # of SGL segments in any one mbuf */
+ aiotx_mbuf_seen = false;
for (m = sndptr; m != NULL; m = m->m_next) {
- int n = sglist_count(mtod(m, void *), m->m_len);
+ int n;
+
+ if (IS_AIOTX_MBUF(m))
+ n = sglist_count_vmpages(aiotx_mbuf_pages(m),
+ aiotx_mbuf_pgoff(m), m->m_len);
+ else
+ n = sglist_count(mtod(m, void *), m->m_len);
nsegs += n;
plen += m->m_len;
@@ -631,9 +686,13 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
if (plen == 0) {
/* Too few credits */
toep->flags |= TPF_TX_SUSPENDED;
- if (sowwakeup)
+ if (sowwakeup) {
+ if (!TAILQ_EMPTY(
+ &toep->aiotx_jobq))
+ t4_aiotx_queue_toep(
+ toep);
sowwakeup_locked(so);
- else
+ } else
SOCKBUF_UNLOCK(sb);
SOCKBUF_UNLOCK_ASSERT(sb);
return;
@@ -641,6 +700,8 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
break;
}
+ if (IS_AIOTX_MBUF(m))
+ aiotx_mbuf_seen = true;
if (max_nsegs_1mbuf < n)
max_nsegs_1mbuf = n;
sb_sndptr = m; /* new sb->sb_sndptr if all goes well */
@@ -670,9 +731,11 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
else
sowwakeup = 1; /* room available */
}
- if (sowwakeup)
+ if (sowwakeup) {
+ if (!TAILQ_EMPTY(&toep->aiotx_jobq))
+ t4_aiotx_queue_toep(toep);
sowwakeup_locked(so);
- else
+ } else
SOCKBUF_UNLOCK(sb);
SOCKBUF_UNLOCK_ASSERT(sb);
@@ -687,7 +750,7 @@ t4_push_frames(struct adapter *sc, struct toepcb *toep, int drop)
panic("%s: excess tx.", __func__);
shove = m == NULL && !(tp->t_flags & TF_MORETOCOME);
- if (plen <= max_imm) {
+ if (plen <= max_imm && !aiotx_mbuf_seen) {
/* Immediate data tx */
@@ -1616,6 +1679,9 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
}
}
+#ifdef VERBOSE_TRACES
+ CTR3(KTR_CXGBE, "%s: tid %d credits %u", __func__, tid, credits);
+#endif
so = inp->inp_socket;
txsd = &toep->txsd[toep->txsd_cidx];
plen = 0;
@@ -1642,6 +1708,10 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
if (toep->flags & TPF_TX_SUSPENDED &&
toep->tx_credits >= toep->tx_total / 4) {
+#ifdef VERBOSE_TRACES
+ CTR2(KTR_CXGBE, "%s: tid %d calling t4_push_frames", __func__,
+ tid);
+#endif
toep->flags &= ~TPF_TX_SUSPENDED;
if (toep->ulp_mode == ULP_MODE_ISCSI)
t4_push_pdus(sc, toep, plen);
@@ -1668,7 +1738,13 @@ do_fw4_ack(struct sge_iq *iq, const struct rss_header *rss, struct mbuf *m)
sowwakeup_locked(so); /* unlocks so_snd */
rqdrop_locked(&toep->ulp_pdu_reclaimq, plen);
} else {
+#ifdef VERBOSE_TRACES
+ CTR3(KTR_CXGBE, "%s: tid %d dropped %d bytes", __func__,
+ tid, plen);
+#endif
sbdrop_locked(sb, plen);
+ if (!TAILQ_EMPTY(&toep->aiotx_jobq))
+ t4_aiotx_queue_toep(toep);
sowwakeup_locked(so); /* unlocks so_snd */
}
SOCKBUF_UNLOCK_ASSERT(sb);
@@ -1768,4 +1844,397 @@ t4_uninit_cpl_io_handlers(void)
t4_register_cpl_handler(CPL_RX_DATA, do_rx_data);
t4_register_cpl_handler(CPL_FW4_ACK, do_fw4_ack);
}
+
+/*
+ * Use the 'backend3' field in AIO jobs to store the amount of data
+ * sent by the AIO job so far and the 'backend4' field to hold an
+ * error that should be reported when the job is completed.
+ */
+#define aio_sent backend3
+#define aio_error backend4
+
+#define jobtotid(job) \
+ (((struct toepcb *)(so_sototcpcb((job)->fd_file->f_data)->t_toe))->tid)
+
+static void
+free_aiotx_buffer(struct aiotx_buffer *ab)
+{
+ struct kaiocb *job;
+ long status;
+ int error;
+
+ if (refcount_release(&ab->refcount) == 0)
+ return;
+
+ job = ab->job;
+ error = job->aio_error;
+ status = job->aio_sent;
+ vm_page_unhold_pages(ab->ps.pages, ab->ps.npages);
+ free(ab, M_CXGBE);
+#ifdef VERBOSE_TRACES
+ CTR5(KTR_CXGBE, "%s: tid %d completed %p len %ld, error %d", __func__,
+ jobtotid(job), job, status, error);
+#endif
+ if (error == ECANCELED && status != 0)
+ error = 0;
+ if (error == ECANCELED)
+ aio_cancel(job);
+ else if (error)
+ aio_complete(job, -1, error);
+ else
+ aio_complete(job, status, 0);
+}
+
+static void
+t4_aiotx_mbuf_free(struct mbuf *m, void *buffer, void *arg)
+{
+ struct aiotx_buffer *ab = buffer;
+
+#ifdef VERBOSE_TRACES
+ CTR3(KTR_CXGBE, "%s: completed %d bytes for tid %d", __func__,
+ m->m_len, jobtotid(ab->job));
+#endif
+ free_aiotx_buffer(ab);
+}
+
+/*
+ * Hold the buffer backing an AIO request and return an AIO transmit
+ * buffer.
+ */
+static int
+hold_aio(struct kaiocb *job)
+{
+ struct aiotx_buffer *ab;
+ struct vmspace *vm;
+ vm_map_t map;
+ vm_offset_t start, end, pgoff;
+ int n;
+
+ MPASS(job->backend1 == NULL);
+
+ /*
+ * The AIO subsystem will cancel and drain all requests before
+ * permitting a process to exit or exec, so p_vmspace should
+ * be stable here.
+ */
+ vm = job->userproc->p_vmspace;
+ map = &vm->vm_map;
+ start = (uintptr_t)job->uaiocb.aio_buf;
+ pgoff = start & PAGE_MASK;
+ end = round_page(start + job->uaiocb.aio_nbytes);
+ start = trunc_page(start);
+ n = atop(end - start);
+
+ ab = malloc(sizeof(*ab) + n * sizeof(vm_page_t), M_CXGBE, M_WAITOK |
+ M_ZERO);
+ refcount_init(&ab->refcount, 1);
+ ab->ps.pages = (vm_page_t *)(ab + 1);
+ ab->ps.npages = vm_fault_quick_hold_pages(map, start, end - start,
+ VM_PROT_WRITE, ab->ps.pages, n);
+ if (ab->ps.npages < 0) {
+ free(ab, M_CXGBE);
+ return (EFAULT);
+ }
+
+ KASSERT(ab->ps.npages == n,
+ ("hold_aio: page count mismatch: %d vs %d", ab->ps.npages, n));
+
+ ab->ps.offset = pgoff;
+ ab->ps.len = job->uaiocb.aio_nbytes;
+ ab->job = job;
+ job->backend1 = ab;
+#ifdef VERBOSE_TRACES
+ CTR5(KTR_CXGBE, "%s: tid %d, new pageset %p for job %p, npages %d",
+ __func__, jobtotid(job), &ab->ps, job, ab->ps.npages);
+#endif
+ return (0);
+}
+
+static void
+t4_aiotx_process_job(struct toepcb *toep, struct socket *so, struct kaiocb *job)
+{
+ struct adapter *sc;
+ struct sockbuf *sb;
+ struct file *fp;
+ struct aiotx_buffer *ab;
+ struct inpcb *inp;
+ struct tcpcb *tp;
+ struct mbuf *m;
+ int error;
+ bool moretocome, sendmore;
+
+ sc = td_adapter(toep->td);
+ sb = &so->so_snd;
+ SOCKBUF_UNLOCK(sb);
+ fp = job->fd_file;
+ ab = job->backend1;
+ m = NULL;
+
+#ifdef MAC
+ error = mac_socket_check_send(fp->f_cred, so);
+ if (error != 0)
+ goto out;
+#endif
+
+ if (ab == NULL) {
+ error = hold_aio(job);
+ if (error != 0)
+ goto out;
+ ab = job->backend1;
+ }
+
+ /* Inline sosend_generic(). */
+
+ job->msgsnd = 1;
+
+ error = sblock(sb, SBL_WAIT);
+ MPASS(error == 0);
+
+sendanother:
+ m = m_get(M_WAITOK, MT_DATA);
+
+ SOCKBUF_LOCK(sb);
+ if (so->so_snd.sb_state & SBS_CANTSENDMORE) {
+ SOCKBUF_UNLOCK(sb);
+ sbunlock(sb);
+ if ((so->so_options & SO_NOSIGPIPE) == 0) {
+ PROC_LOCK(job->userproc);
+ kern_psignal(job->userproc, SIGPIPE);
+ PROC_UNLOCK(job->userproc);
+ }
+ error = EPIPE;
+ goto out;
+ }
+ if (so->so_error) {
+ error = so->so_error;
+ so->so_error = 0;
+ SOCKBUF_UNLOCK(sb);
+ sbunlock(sb);
+ goto out;
+ }
+ if ((so->so_state & SS_ISCONNECTED) == 0) {
+ SOCKBUF_UNLOCK(sb);
+ sbunlock(sb);
+ error = ENOTCONN;
+ goto out;
+ }
+ if (sbspace(sb) < sb->sb_lowat) {
+ MPASS(job->aio_sent == 0 || !(so->so_state & SS_NBIO));
+
+ /*
+ * Don't block if there is too little room in the socket
+ * buffer. Instead, requeue the request.
+ */
+ if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
+ SOCKBUF_UNLOCK(sb);
+ sbunlock(sb);
+ error = ECANCELED;
+ goto out;
+ }
+ TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
+ SOCKBUF_UNLOCK(sb);
+ sbunlock(sb);
+ goto out;
+ }
+
+ /*
+ * Write as much data as the socket permits, but no more than a
+ * a single sndbuf at a time.
+ */
+ m->m_len = sbspace(sb);
+ if (m->m_len > ab->ps.len - job->aio_sent) {
+ m->m_len = ab->ps.len - job->aio_sent;
+ moretocome = false;
+ } else
+ moretocome = true;
+ if (m->m_len > sc->tt.sndbuf) {
+ m->m_len = sc->tt.sndbuf;
+ sendmore = true;
+ } else
+ sendmore = false;
+
+ if (!TAILQ_EMPTY(&toep->aiotx_jobq))
+ moretocome = true;
+ SOCKBUF_UNLOCK(sb);
+ MPASS(m->m_len != 0);
+
+ /* Inlined tcp_usr_send(). */
+
+ inp = toep->inp;
+ INP_WLOCK(inp);
+ if (inp->inp_flags & (INP_TIMEWAIT | INP_DROPPED)) {
+ INP_WUNLOCK(inp);
+ sbunlock(sb);
+ error = ECONNRESET;
+ goto out;
+ }
+
+ refcount_acquire(&ab->refcount);
+ m_extadd(m, NULL, ab->ps.len, t4_aiotx_mbuf_free, ab,
+ (void *)(uintptr_t)job->aio_sent, 0, EXT_NET_DRV);
+ m->m_ext.ext_flags |= EXT_FLAG_AIOTX;
+ job->aio_sent += m->m_len;
+
+ sbappendstream(sb, m, 0);
+ m = NULL;
+
+ if (!(inp->inp_flags & INP_DROPPED)) {
+ tp = intotcpcb(inp);
+ if (moretocome)
+ tp->t_flags |= TF_MORETOCOME;
+ error = tp->t_fb->tfb_tcp_output(tp);
+ if (moretocome)
+ tp->t_flags &= ~TF_MORETOCOME;
+ }
+
+ INP_WUNLOCK(inp);
+ if (sendmore)
+ goto sendanother;
+ sbunlock(sb);
+
+ if (error)
+ goto out;
+
+ /*
+ * If this is a non-blocking socket and the request has not
+ * been fully completed, requeue it until the socket is ready
+ * again.
+ */
+ if (job->aio_sent < job->uaiocb.aio_nbytes &&
+ !(so->so_state & SS_NBIO)) {
+ SOCKBUF_LOCK(sb);
+ if (!aio_set_cancel_function(job, t4_aiotx_cancel)) {
+ SOCKBUF_UNLOCK(sb);
+ error = ECANCELED;
+ goto out;
+ }
+ TAILQ_INSERT_HEAD(&toep->aiotx_jobq, job, list);
+ return;
+ }
+
+ /*
+ * If the request will not be requeued, drop a reference on
+ * the the aiotx buffer. Any mbufs in flight should still
+ * contain a reference, but this drops the reference that the
+ * job owns while it is waiting to queue mbufs to the socket.
+ */
+ free_aiotx_buffer(ab);
+
+out:
+ if (error) {
+ if (ab != NULL) {
+ job->aio_error = error;
+ free_aiotx_buffer(ab);
+ } else {
+ MPASS(job->aio_sent == 0);
+ aio_complete(job, -1, error);
+ }
+ }
+ if (m != NULL)
+ m_free(m);
+ SOCKBUF_LOCK(sb);
+}
+
+static void
+t4_aiotx_task(void *context, int pending)
+{
+ struct toepcb *toep = context;
+ struct inpcb *inp = toep->inp;
+ struct socket *so = inp->inp_socket;
+ struct kaiocb *job;
+
+ CURVNET_SET(so->so_vnet);
+ SOCKBUF_LOCK(&so->so_snd);
+ while (!TAILQ_EMPTY(&toep->aiotx_jobq) && sowriteable(so)) {
+ job = TAILQ_FIRST(&toep->aiotx_jobq);
+ TAILQ_REMOVE(&toep->aiotx_jobq, job, list);
+ if (!aio_clear_cancel_function(job))
+ continue;
+
+ t4_aiotx_process_job(toep, so, job);
+ }
+ toep->aiotx_task_active = false;
+ SOCKBUF_UNLOCK(&so->so_snd);
+ CURVNET_RESTORE();
+
+ free_toepcb(toep);
+}
+
+static void
+t4_aiotx_queue_toep(struct toepcb *toep)
+{
+
+ SOCKBUF_LOCK_ASSERT(&toep->inp->inp_socket->so_snd);
+#ifdef VERBOSE_TRACES
+ CTR3(KTR_CXGBE, "%s: queueing aiotx task for tid %d, active = %s",
+ __func__, toep->tid, toep->aiotx_task_active ? "true" : "false");
+#endif
+ if (toep->aiotx_task_active)
+ return;
+ toep->aiotx_task_active = true;
+ hold_toepcb(toep);
+ soaio_enqueue(&toep->aiotx_task);
+}
+
+static void
+t4_aiotx_cancel(struct kaiocb *job)
+{
+ struct aiotx_buffer *ab;
+ struct socket *so;
+ struct sockbuf *sb;
+ struct tcpcb *tp;
+ struct toepcb *toep;
+
+ so = job->fd_file->f_data;
+ tp = so_sototcpcb(so);
+ toep = tp->t_toe;
+ MPASS(job->uaiocb.aio_lio_opcode == LIO_WRITE);
+ sb = &so->so_snd;
+
+ SOCKBUF_LOCK(sb);
+ if (!aio_cancel_cleared(job))
+ TAILQ_REMOVE(&toep->aiotx_jobq, job, list);
+ SOCKBUF_UNLOCK(sb);
+
+ ab = job->backend1;
+ if (ab != NULL)
+ free_aiotx_buffer(ab);
+ else
+ aio_cancel(job);
+}
+
+int
+t4_aio_queue_aiotx(struct socket *so, struct kaiocb *job)
+{
+ struct tcpcb *tp = so_sototcpcb(so);
+ struct toepcb *toep = tp->t_toe;
+ struct adapter *sc = td_adapter(toep->td);
+
+ /* This only handles writes. */
+ if (job->uaiocb.aio_lio_opcode != LIO_WRITE)
+ return (EOPNOTSUPP);
+
+ if (!sc->tt.tx_zcopy)
+ return (EOPNOTSUPP);
+
+ SOCKBUF_LOCK(&so->so_snd);
+#ifdef VERBOSE_TRACES
+ CTR2(KTR_CXGBE, "%s: queueing %p", __func__, job);
+#endif
+ if (!aio_set_cancel_function(job, t4_aiotx_cancel))
+ panic("new job was cancelled");
+ TAILQ_INSERT_TAIL(&toep->aiotx_jobq, job, list);
+ if (sowriteable(so))
+ t4_aiotx_queue_toep(toep);
+ SOCKBUF_UNLOCK(&so->so_snd);
+ return (0);
+}
+
+void
+aiotx_init_toep(struct toepcb *toep)
+{
+
+ TAILQ_INIT(&toep->aiotx_jobq);
+ TASK_INIT(&toep->aiotx_task, 0, t4_aiotx_task, toep);
+}
#endif
diff --git a/sys/dev/cxgbe/tom/t4_ddp.c b/sys/dev/cxgbe/tom/t4_ddp.c
index ff73c8bb3cad..d70fb5911d39 100644
--- a/sys/dev/cxgbe/tom/t4_ddp.c
+++ b/sys/dev/cxgbe/tom/t4_ddp.c
@@ -88,9 +88,6 @@ static void t4_aio_cancel_queued(struct kaiocb *job);
#define PPOD_SZ(n) ((n) * sizeof(struct pagepod))
#define PPOD_SIZE (PPOD_SZ(1))
-/* XXX: must match A_ULP_RX_TDDP_PSZ */
-static int t4_ddp_pgsz[] = {4096, 4096 << 2, 4096 << 4, 4096 << 6};
-
static TAILQ_HEAD(, pageset) ddp_orphan_pagesets;
static struct mtx ddp_orphan_pagesets_lock;
static struct task ddp_orphan_task;
@@ -908,13 +905,13 @@ alloc_page_pods(struct tom_data *td, struct pageset *ps)
}
hcf = calculate_hcf(hcf, seglen);
- if (hcf < t4_ddp_pgsz[1]) {
+ if (hcf < td->ddp_pgsz[1]) {
idx = 0;
goto have_pgsz; /* give up, short circuit */
}
}
- if (hcf % t4_ddp_pgsz[0] != 0) {
+ if (hcf % td->ddp_pgsz[0] != 0) {
/* hmmm. This could only happen when PAGE_SIZE < 4K */
KASSERT(PAGE_SIZE < 4096,
("%s: PAGE_SIZE %d, hcf %d", __func__, PAGE_SIZE, hcf));
@@ -923,17 +920,17 @@ alloc_page_pods(struct tom_data *td, struct pageset *ps)
return (0);
}
- for (idx = nitems(t4_ddp_pgsz) - 1; idx > 0; idx--) {
- if (hcf % t4_ddp_pgsz[idx] == 0)
+ for (idx = nitems(td->ddp_pgsz) - 1; idx > 0; idx--) {
+ if (hcf % td->ddp_pgsz[idx] == 0)
break;
}
have_pgsz:
MPASS(idx <= M_PPOD_PGSZ);
- nppods = pages_to_nppods(ps->npages, t4_ddp_pgsz[idx]);
+ nppods = pages_to_nppods(ps->npages, td->ddp_pgsz[idx]);
if (alloc_ppods(td, nppods, &ppod_addr) != 0) {
CTR4(KTR_CXGBE, "%s: no pods, nppods %d, npages %d, pgsz %d",
- __func__, nppods, ps->npages, t4_ddp_pgsz[idx]);
+ __func__, nppods, ps->npages, td->ddp_pgsz[idx]);
return (0);
}
@@ -944,7 +941,7 @@ have_pgsz:
CTR5(KTR_CXGBE, "New page pods. "
"ps %p, ddp_pgsz %d, ppod 0x%x, npages %d, nppods %d",
- ps, t4_ddp_pgsz[idx], ppod, ps->npages, ps->nppods);
+ ps, td->ddp_pgsz[idx], ppod, ps->npages, ps->nppods);
return (1);
}
@@ -958,6 +955,7 @@ write_page_pods(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
struct ulp_mem_io *ulpmc;
struct ulptx_idata *ulpsc;
struct pagepod *ppod;
+ struct tom_data *td = sc->tom_softc;
int i, j, k, n, chunk, len, ddp_pgsz, idx;
u_int ppod_addr;
uint32_t cmd;
@@ -970,7 +968,7 @@ write_page_pods(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
cmd |= htobe32(F_ULP_MEMIO_ORDER);
else
cmd |= htobe32(F_T5_ULP_MEMIO_IMM);
- ddp_pgsz = t4_ddp_pgsz[G_PPOD_PGSZ(ps->tag)];
+ ddp_pgsz = td->ddp_pgsz[G_PPOD_PGSZ(ps->tag)];
ppod_addr = ps->ppod_addr;
for (i = 0; i < ps->nppods; ppod_addr += chunk) {
@@ -1069,10 +1067,27 @@ prep_pageset(struct adapter *sc, struct toepcb *toep, struct pageset *ps)
void
t4_init_ddp(struct adapter *sc, struct tom_data *td)
{
+ int i;
+ uint32_t r;
+
+ r = t4_read_reg(sc, A_ULP_RX_TDDP_PSZ);
+ td->ddp_pgsz[0] = 4096 << G_HPZ0(r);
+ td->ddp_pgsz[1] = 4096 << G_HPZ1(r);
+ td->ddp_pgsz[2] = 4096 << G_HPZ2(r);
+ td->ddp_pgsz[3] = 4096 << G_HPZ3(r);
+
+ /*
+ * The SGL -> page pod algorithm requires the sizes to be in increasing
+ * order.
+ */
+ for (i = 1; i < nitems(td->ddp_pgsz); i++) {
+ if (td->ddp_pgsz[i] <= td->ddp_pgsz[i - 1])
+ return;
+ }
td->ppod_start = sc->vres.ddp.start;
td->ppod_arena = vmem_create("DDP page pods", sc->vres.ddp.start,
- sc->vres.ddp.size, 1, 32, M_FIRSTFIT | M_NOWAIT);
+ sc->vres.ddp.size, PPOD_SIZE, 512, M_FIRSTFIT | M_NOWAIT);
}
void
diff --git a/sys/dev/cxgbe/tom/t4_tom.c b/sys/dev/cxgbe/tom/t4_tom.c
index 2a8082e92865..56006d0ac891 100644
--- a/sys/dev/cxgbe/tom/t4_tom.c
+++ b/sys/dev/cxgbe/tom/t4_tom.c
@@ -68,11 +68,11 @@ __FBSDID("$FreeBSD$");
#include "tom/t4_tom_l2t.h"
#include "tom/t4_tom.h"
-static struct protosw ddp_protosw;
-static struct pr_usrreqs ddp_usrreqs;
+static struct protosw toe_protosw;
+static struct pr_usrreqs toe_usrreqs;
-static struct protosw ddp6_protosw;
-static struct pr_usrreqs ddp6_usrreqs;
+static struct protosw toe6_protosw;
+static struct pr_usrreqs toe6_usrreqs;
/* Module ops */
static int t4_tom_mod_load(void);
@@ -167,6 +167,7 @@ alloc_toepcb(struct vi_info *vi, int txqid, int rxqid, int flags)
toep->txsd_avail = txsd_total;
toep->txsd_pidx = 0;
toep->txsd_cidx = 0;
+ aiotx_init_toep(toep);
ddp_init_toep(toep);
return (toep);
@@ -217,12 +218,10 @@ offload_socket(struct socket *so, struct toepcb *toep)
sb = &so->so_rcv;
SOCKBUF_LOCK(sb);
sb->sb_flags |= SB_NOCOALESCE;
- if (toep->ulp_mode == ULP_MODE_TCPDDP) {
- if (inp->inp_vflag & INP_IPV6)
- so->so_proto = &ddp6_protosw;
- else
- so->so_proto = &ddp_protosw;
- }
+ if (inp->inp_vflag & INP_IPV6)
+ so->so_proto = &toe6_protosw;
+ else
+ so->so_proto = &toe_protosw;
SOCKBUF_UNLOCK(sb);
/* Update TCP PCB */
@@ -1120,6 +1119,22 @@ t4_tom_ifaddr_event(void *arg __unused, struct ifnet *ifp)
}
static int
+t4_aio_queue_tom(struct socket *so, struct kaiocb *job)
+{
+ struct tcpcb *tp = so_sototcpcb(so);
+ struct toepcb *toep = tp->t_toe;
+ int error;
+
+ if (toep->ulp_mode == ULP_MODE_TCPDDP) {
+ error = t4_aio_queue_ddp(so, job);
+ if (error != EOPNOTSUPP)
+ return (error);
+ }
+
+ return (t4_aio_queue_aiotx(so, job));
+}
+
+static int
t4_tom_mod_load(void)
{
int rc;
@@ -1137,18 +1152,18 @@ t4_tom_mod_load(void)
tcp_protosw = pffindproto(PF_INET, IPPROTO_TCP, SOCK_STREAM);
if (tcp_protosw == NULL)
return (ENOPROTOOPT);
- bcopy(tcp_protosw, &ddp_protosw, sizeof(ddp_protosw));
- bcopy(tcp_protosw->pr_usrreqs, &ddp_usrreqs, sizeof(ddp_usrreqs));
- ddp_usrreqs.pru_aio_queue = t4_aio_queue_ddp;
- ddp_protosw.pr_usrreqs = &ddp_usrreqs;
+ bcopy(tcp_protosw, &toe_protosw, sizeof(toe_protosw));
+ bcopy(tcp_protosw->pr_usrreqs, &toe_usrreqs, sizeof(toe_usrreqs));
+ toe_usrreqs.pru_aio_queue = t4_aio_queue_tom;
+ toe_protosw.pr_usrreqs = &toe_usrreqs;
tcp6_protosw = pffindproto(PF_INET6, IPPROTO_TCP, SOCK_STREAM);
if (tcp6_protosw == NULL)
return (ENOPROTOOPT);
- bcopy(tcp6_protosw, &ddp6_protosw, sizeof(ddp6_protosw));
- bcopy(tcp6_protosw->pr_usrreqs, &ddp6_usrreqs, sizeof(ddp6_usrreqs));
- ddp6_usrreqs.pru_aio_queue = t4_aio_queue_ddp;
- ddp6_protosw.pr_usrreqs = &ddp6_usrreqs;
+ bcopy(tcp6_protosw, &toe6_protosw, sizeof(toe6_protosw));
+ bcopy(tcp6_protosw->pr_usrreqs, &toe6_usrreqs, sizeof(toe6_usrreqs));
+ toe6_usrreqs.pru_aio_queue = t4_aio_queue_tom;
+ toe6_protosw.pr_usrreqs = &toe6_usrreqs;
TIMEOUT_TASK_INIT(taskqueue_thread, &clip_task, 0, t4_clip_task, NULL);
ifaddr_evhandler = EVENTHANDLER_REGISTER(ifaddr_event,
diff --git a/sys/dev/cxgbe/tom/t4_tom.h b/sys/dev/cxgbe/tom/t4_tom.h
index f114a3dfb429..313fb7b510d6 100644
--- a/sys/dev/cxgbe/tom/t4_tom.h
+++ b/sys/dev/cxgbe/tom/t4_tom.h
@@ -102,6 +102,8 @@ TAILQ_HEAD(pagesetq, pageset);
#define PS_WIRED 0x0001 /* Pages wired rather than held. */
#define PS_PPODS_WRITTEN 0x0002 /* Page pods written to the card. */
+#define EXT_FLAG_AIOTX EXT_FLAG_VENDOR1
+
struct ddp_buffer {
struct pageset *ps;
@@ -109,6 +111,12 @@ struct ddp_buffer {
int cancel_pending;
};
+struct aiotx_buffer {
+ struct pageset ps;
+ struct kaiocb *job;
+ int refcount;
+};
+
struct toepcb {
TAILQ_ENTRY(toepcb) link; /* toep_list */
u_int flags; /* miscellaneous flags */
@@ -151,6 +159,10 @@ struct toepcb {
struct kaiocb *ddp_queueing;
struct mtx ddp_lock;
+ TAILQ_HEAD(, kaiocb) aiotx_jobq;
+ struct task aiotx_task;
+ bool aiotx_task_active;
+
/* Tx software descriptor */
uint8_t txsd_total;
uint8_t txsd_pidx;
@@ -228,6 +240,7 @@ struct tom_data {
int lctx_count; /* # of lctx in the hash table */
u_int ppod_start;
+ u_int ddp_pgsz[4];
vmem_t *ppod_arena;
struct mtx clip_table_lock;
@@ -313,6 +326,8 @@ int do_abort_rpl_synqe(struct sge_iq *, const struct rss_header *,
void t4_offload_socket(struct toedev *, void *, struct socket *);
/* t4_cpl_io.c */
+void aiotx_init_toep(struct toepcb *);
+int t4_aio_queue_aiotx(struct socket *, struct kaiocb *);
void t4_init_cpl_io_handlers(void);
void t4_uninit_cpl_io_handlers(void);
void send_abort_rpl(struct adapter *, struct sge_wrq *, int , int);
diff --git a/sys/dev/e1000/e1000_api.c b/sys/dev/e1000/e1000_api.c
index 28379cc572d3..52e260950d24 100644
--- a/sys/dev/e1000/e1000_api.c
+++ b/sys/dev/e1000/e1000_api.c
@@ -304,6 +304,10 @@ s32 e1000_set_mac_type(struct e1000_hw *hw)
case E1000_DEV_ID_PCH_SPT_I219_LM2:
case E1000_DEV_ID_PCH_SPT_I219_V2:
case E1000_DEV_ID_PCH_LBG_I219_LM3:
+ case E1000_DEV_ID_PCH_SPT_I219_LM4:
+ case E1000_DEV_ID_PCH_SPT_I219_V4:
+ case E1000_DEV_ID_PCH_SPT_I219_LM5:
+ case E1000_DEV_ID_PCH_SPT_I219_V5:
mac->type = e1000_pch_spt;
break;
case E1000_DEV_ID_82575EB_COPPER:
diff --git a/sys/dev/e1000/e1000_hw.h b/sys/dev/e1000/e1000_hw.h
index 1792e14ef38a..e1464a7b655a 100644
--- a/sys/dev/e1000/e1000_hw.h
+++ b/sys/dev/e1000/e1000_hw.h
@@ -142,6 +142,10 @@ struct e1000_hw;
#define E1000_DEV_ID_PCH_SPT_I219_LM2 0x15B7 /* Sunrise Point-H PCH */
#define E1000_DEV_ID_PCH_SPT_I219_V2 0x15B8 /* Sunrise Point-H PCH */
#define E1000_DEV_ID_PCH_LBG_I219_LM3 0x15B9 /* LEWISBURG PCH */
+#define E1000_DEV_ID_PCH_SPT_I219_LM4 0x15D7
+#define E1000_DEV_ID_PCH_SPT_I219_V4 0x15D8
+#define E1000_DEV_ID_PCH_SPT_I219_LM5 0x15E3
+#define E1000_DEV_ID_PCH_SPT_I219_V5 0x15D6
#define E1000_DEV_ID_82576 0x10C9
#define E1000_DEV_ID_82576_FIBER 0x10E6
#define E1000_DEV_ID_82576_SERDES 0x10E7
@@ -957,9 +961,13 @@ struct e1000_dev_spec_ich8lan {
E1000_MUTEX nvm_mutex;
E1000_MUTEX swflag_mutex;
bool nvm_k1_enabled;
+ bool disable_k1_off;
bool eee_disable;
u16 eee_lp_ability;
enum e1000_ulp_state ulp_state;
+ bool ulp_capability_disabled;
+ bool during_suspend_flow;
+ bool during_dpg_exit;
};
struct e1000_dev_spec_82575 {
diff --git a/sys/dev/e1000/e1000_ich8lan.c b/sys/dev/e1000/e1000_ich8lan.c
index ae97a8c0d389..4c50ce296385 100644
--- a/sys/dev/e1000/e1000_ich8lan.c
+++ b/sys/dev/e1000/e1000_ich8lan.c
@@ -288,7 +288,7 @@ static void e1000_toggle_lanphypc_pch_lpt(struct e1000_hw *hw)
mac_reg &= ~E1000_CTRL_LANPHYPC_VALUE;
E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
E1000_WRITE_FLUSH(hw);
- usec_delay(10);
+ msec_delay(1);
mac_reg &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
E1000_WRITE_REG(hw, E1000_CTRL, mac_reg);
E1000_WRITE_FLUSH(hw);
@@ -1625,7 +1625,17 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
hw->phy.ops.write_reg_locked(hw,
I217_PLL_CLOCK_GATE_REG,
phy_reg);
- }
+
+ if (speed == SPEED_1000) {
+ hw->phy.ops.read_reg_locked(hw, HV_PM_CTRL,
+ &phy_reg);
+
+ phy_reg |= HV_PM_CTRL_K1_CLK_REQ;
+
+ hw->phy.ops.write_reg_locked(hw, HV_PM_CTRL,
+ phy_reg);
+ }
+ }
hw->phy.ops.release(hw);
if (ret_val)
@@ -1718,7 +1728,8 @@ static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
u32 pcieanacfg = E1000_READ_REG(hw, E1000_PCIEANACFG);
u32 fextnvm6 = E1000_READ_REG(hw, E1000_FEXTNVM6);
- if (pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE)
+ if ((pcieanacfg & E1000_FEXTNVM6_K1_OFF_ENABLE) &&
+ (hw->dev_spec.ich8lan.disable_k1_off == FALSE))
fextnvm6 |= E1000_FEXTNVM6_K1_OFF_ENABLE;
else
fextnvm6 &= ~E1000_FEXTNVM6_K1_OFF_ENABLE;
diff --git a/sys/dev/e1000/e1000_ich8lan.h b/sys/dev/e1000/e1000_ich8lan.h
index edc1dd14ccc9..6d812911e8b8 100644
--- a/sys/dev/e1000/e1000_ich8lan.h
+++ b/sys/dev/e1000/e1000_ich8lan.h
@@ -239,7 +239,7 @@
/* PHY Power Management Control */
#define HV_PM_CTRL PHY_REG(770, 17)
-#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA 0x100
+#define HV_PM_CTRL_K1_CLK_REQ 0x200
#define HV_PM_CTRL_K1_ENABLE 0x4000
#define I217_PLL_CLOCK_GATE_REG PHY_REG(772, 28)
diff --git a/sys/dev/e1000/e1000_osdep.h b/sys/dev/e1000/e1000_osdep.h
index c0bf4182ec62..c7c23e582ca9 100644
--- a/sys/dev/e1000/e1000_osdep.h
+++ b/sys/dev/e1000/e1000_osdep.h
@@ -134,7 +134,7 @@ struct e1000_osdep
bus_space_handle_t io_bus_space_handle;
bus_space_tag_t flash_bus_space_tag;
bus_space_handle_t flash_bus_space_handle;
- struct device *dev;
+ device_t dev;
};
#define E1000_REGISTER(hw, reg) (((hw)->mac.type >= e1000_82543) \
diff --git a/sys/dev/e1000/e1000_phy.c b/sys/dev/e1000/e1000_phy.c
index 847d3155e2af..9684b43f5503 100644
--- a/sys/dev/e1000/e1000_phy.c
+++ b/sys/dev/e1000/e1000_phy.c
@@ -4146,12 +4146,13 @@ s32 e1000_read_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 *data)
*data = E1000_READ_REG(hw, E1000_MPHY_DATA);
/* Disable access to mPHY if it was originally disabled */
- if (locked) {
+ if (locked)
ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- }
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, E1000_MPHY_DIS_ACCESS);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
return E1000_SUCCESS;
}
@@ -4210,12 +4211,13 @@ s32 e1000_write_phy_reg_mphy(struct e1000_hw *hw, u32 address, u32 data,
E1000_WRITE_REG(hw, E1000_MPHY_DATA, data);
/* Disable access to mPHY if it was originally disabled */
- if (locked) {
+ if (locked)
ready = e1000_is_mphy_ready(hw);
- if (!ready)
- return -E1000_ERR_PHY;
- }
- E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL, E1000_MPHY_DIS_ACCESS);
+ if (!ready)
+ return -E1000_ERR_PHY;
+ E1000_WRITE_REG(hw, E1000_MPHY_ADDR_CTRL,
+ E1000_MPHY_DIS_ACCESS);
+
return E1000_SUCCESS;
}
diff --git a/sys/dev/e1000/if_em.c b/sys/dev/e1000/if_em.c
index ddd651d82d18..7e2690eae084 100644
--- a/sys/dev/e1000/if_em.c
+++ b/sys/dev/e1000/if_em.c
@@ -193,6 +193,12 @@ static em_vendor_info_t em_vendor_info_array[] =
{ 0x8086, E1000_DEV_ID_PCH_SPT_I219_V2, PCI_ANY_ID, PCI_ANY_ID, 0},
{ 0x8086, E1000_DEV_ID_PCH_LBG_I219_LM3,
PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM4,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V4, PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_SPT_I219_LM5,
+ PCI_ANY_ID, PCI_ANY_ID, 0},
+ { 0x8086, E1000_DEV_ID_PCH_SPT_I219_V5, PCI_ANY_ID, PCI_ANY_ID, 0},
/* required last entry */
{ 0, 0, 0, 0, 0}
};
@@ -4392,6 +4398,7 @@ em_setup_receive_ring(struct rx_ring *rxr)
addr = PNMB(na, slot + si, &paddr);
netmap_load_map(na, rxr->rxtag, rxbuf->map, addr);
+ rxbuf->paddr = paddr;
em_setup_rxdesc(&rxr->rx_base[j], rxbuf);
continue;
}
diff --git a/sys/dev/e1000/if_em.h b/sys/dev/e1000/if_em.h
index c8c98cde6c02..2a2bf2ccb7ab 100644
--- a/sys/dev/e1000/if_em.h
+++ b/sys/dev/e1000/if_em.h
@@ -394,7 +394,7 @@ struct adapter {
/* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
- struct device *dev;
+ device_t dev;
struct cdev *led_dev;
struct resource *memory;
diff --git a/sys/dev/e1000/if_igb.h b/sys/dev/e1000/if_igb.h
index a30fc993230c..e2f2219cc94c 100644
--- a/sys/dev/e1000/if_igb.h
+++ b/sys/dev/e1000/if_igb.h
@@ -429,7 +429,7 @@ struct adapter {
struct e1000_hw hw;
struct e1000_osdep osdep;
- struct device *dev;
+ device_t dev;
struct cdev *led_dev;
struct resource *pci_mem;
diff --git a/sys/dev/e1000/if_lem.h b/sys/dev/e1000/if_lem.h
index 2bdc21a2c335..4a27c34bd772 100644
--- a/sys/dev/e1000/if_lem.h
+++ b/sys/dev/e1000/if_lem.h
@@ -298,7 +298,7 @@ struct adapter {
/* FreeBSD operating-system-specific structures. */
struct e1000_osdep osdep;
- struct device *dev;
+ device_t dev;
struct cdev *led_dev;
struct resource *memory;
diff --git a/sys/dev/etherswitch/arswitch/arswitch.c b/sys/dev/etherswitch/arswitch/arswitch.c
index 8396f8aee501..09055632da0e 100644
--- a/sys/dev/etherswitch/arswitch/arswitch.c
+++ b/sys/dev/etherswitch/arswitch/arswitch.c
@@ -73,9 +73,13 @@
#include "miibus_if.h"
#include "etherswitch_if.h"
-#if defined(DEBUG)
-static SYSCTL_NODE(_debug, OID_AUTO, arswitch, CTLFLAG_RD, 0, "arswitch");
-#endif
+/* Map ETHERSWITCH_PORT_LED_* to Atheros pattern codes */
+static int led_pattern_table[] = {
+ [ETHERSWITCH_PORT_LED_DEFAULT] = 0x3,
+ [ETHERSWITCH_PORT_LED_ON] = 0x2,
+ [ETHERSWITCH_PORT_LED_OFF] = 0x0,
+ [ETHERSWITCH_PORT_LED_BLINK] = 0x1
+};
static inline int arswitch_portforphy(int phy);
static void arswitch_tick(void *arg);
@@ -85,6 +89,8 @@ static int ar8xxx_port_vlan_setup(struct arswitch_softc *sc,
etherswitch_port_t *p);
static int ar8xxx_port_vlan_get(struct arswitch_softc *sc,
etherswitch_port_t *p);
+static int arswitch_setled(struct arswitch_softc *sc, int phy, int led,
+ int style);
static int
arswitch_probe(device_t dev)
@@ -146,7 +152,7 @@ arswitch_probe(device_t dev)
done:
- DPRINTF(dev, "chipname=%s, id=%08x\n", chipname, id);
+ DPRINTF(sc, ARSWITCH_DBG_ANY, "chipname=%s, id=%08x\n", chipname, id);
if (chipname != NULL) {
snprintf(desc, sizeof(desc),
"Atheros %s Ethernet Switch (ver %d rev %d)",
@@ -188,9 +194,23 @@ arswitch_attach_phys(struct arswitch_softc *sc)
device_printf(sc->sc_dev,
"attaching PHY %d failed\n",
phy);
+ return (err);
+ }
+
+ if (AR8X16_IS_SWITCH(sc, AR8327)) {
+ int led;
+ char ledname[IFNAMSIZ+4];
+
+ for (led = 0; led < 3; led++) {
+ sprintf(ledname, "%s%dled%d", name,
+ arswitch_portforphy(phy), led+1);
+ sc->dev_led[phy][led].sc = sc;
+ sc->dev_led[phy][led].phy = phy;
+ sc->dev_led[phy][led].lednum = led;
+ }
}
}
- return (err);
+ return (0);
}
static int
@@ -285,12 +305,12 @@ ar8xxx_atu_flush(struct arswitch_softc *sc)
static int
arswitch_attach(device_t dev)
{
- struct arswitch_softc *sc;
+ struct arswitch_softc *sc = device_get_softc(dev);
+ struct sysctl_ctx_list *ctx;
+ struct sysctl_oid *tree;
int err = 0;
int port;
- sc = device_get_softc(dev);
-
/* sc->sc_switchtype is already decided in arswitch_probe() */
sc->sc_dev = dev;
mtx_init(&sc->sc_mtx, "arswitch", NULL, MTX_DEF);
@@ -298,6 +318,13 @@ arswitch_attach(device_t dev)
strlcpy(sc->info.es_name, device_get_desc(dev),
sizeof(sc->info.es_name));
+ /* Debugging */
+ ctx = device_get_sysctl_ctx(sc->sc_dev);
+ tree = device_get_sysctl_tree(sc->sc_dev);
+ SYSCTL_ADD_UINT(ctx, SYSCTL_CHILDREN(tree), OID_AUTO,
+ "debug", CTLFLAG_RW, &sc->sc_debug, 0,
+ "control debugging printfs");
+
/* Default HAL methods */
sc->hal.arswitch_port_init = ar8xxx_port_init;
sc->hal.arswitch_port_vlan_setup = ar8xxx_port_vlan_setup;
@@ -339,7 +366,8 @@ arswitch_attach(device_t dev)
else if (AR8X16_IS_SWITCH(sc, AR8327))
ar8327_attach(sc);
else {
- DPRINTF(dev, "%s: unknown switch (%d)?\n", __func__, sc->sc_switchtype);
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: unknown switch (%d)?\n", __func__, sc->sc_switchtype);
return (ENXIO);
}
@@ -369,19 +397,24 @@ arswitch_attach(device_t dev)
/* Reset the switch. */
if (arswitch_reset(dev)) {
- DPRINTF(dev, "%s: arswitch_reset: failed\n", __func__);
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: arswitch_reset: failed\n", __func__);
return (ENXIO);
}
err = sc->hal.arswitch_hw_setup(sc);
- DPRINTF(dev, "%s: hw_setup: err=%d\n", __func__, err);
- if (err != 0)
+ if (err != 0) {
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: hw_setup: err=%d\n", __func__, err);
return (err);
+ }
err = sc->hal.arswitch_hw_global_setup(sc);
- DPRINTF(dev, "%s: hw_global_setup: err=%d\n", __func__, err);
- if (err != 0)
+ if (err != 0) {
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: hw_global_setup: err=%d\n", __func__, err);
return (err);
+ }
/* Initialize the switch ports. */
for (port = 0; port <= sc->numphys; port++) {
@@ -392,22 +425,28 @@ arswitch_attach(device_t dev)
* Attach the PHYs and complete the bus enumeration.
*/
err = arswitch_attach_phys(sc);
- DPRINTF(dev, "%s: attach_phys: err=%d\n", __func__, err);
- if (err != 0)
+ if (err != 0) {
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: attach_phys: err=%d\n", __func__, err);
return (err);
+ }
/* Default to ingress filters off. */
err = arswitch_set_vlan_mode(sc, 0);
- DPRINTF(dev, "%s: set_vlan_mode: err=%d\n", __func__, err);
- if (err != 0)
+ if (err != 0) {
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: set_vlan_mode: err=%d\n", __func__, err);
return (err);
+ }
bus_generic_probe(dev);
bus_enumerate_hinted_children(dev);
err = bus_generic_attach(dev);
- DPRINTF(dev, "%s: bus_generic_attach: err=%d\n", __func__, err);
- if (err != 0)
+ if (err != 0) {
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: bus_generic_attach: err=%d\n", __func__, err);
return (err);
+ }
callout_init_mtx(&sc->callout_tick, &sc->sc_mtx, 0);
@@ -536,10 +575,11 @@ arswitch_miipollstat(struct arswitch_softc *sc)
else
portstatus = arswitch_readreg(sc->sc_dev,
AR8X16_REG_PORT_STS(arswitch_portforphy(i)));
-#if 0
- DPRINTF(sc->sc_dev, "p[%d]=%b\n",
+#if 1
+ DPRINTF(sc, ARSWITCH_DBG_POLL, "p[%d]=0x%08x (%b)\n",
i,
portstatus,
+ portstatus,
"\20\3TXMAC\4RXMAC\5TXFLOW\6RXFLOW\7"
"DUPLEX\11LINK_UP\12LINK_AUTO\13LINK_PAUSE");
#endif
@@ -683,6 +723,38 @@ arswitch_getport(device_t dev, etherswitch_port_t *p)
} else {
return (ENXIO);
}
+
+ if (!arswitch_is_cpuport(sc, p->es_port) &&
+ AR8X16_IS_SWITCH(sc, AR8327)) {
+ int led;
+ p->es_nleds = 3;
+
+ for (led = 0; led < p->es_nleds; led++)
+ {
+ int style;
+ uint32_t val;
+
+ /* Find the right style enum for our pattern */
+ val = arswitch_readreg(dev,
+ ar8327_led_mapping[p->es_port-1][led].reg);
+ val = (val>>ar8327_led_mapping[p->es_port-1][led].shift)&0x03;
+
+ for (style = 0; style < ETHERSWITCH_PORT_LED_MAX; style++)
+ {
+ if (led_pattern_table[style] == val) break;
+ }
+
+ /* can't happen */
+ if (style == ETHERSWITCH_PORT_LED_MAX)
+ style = ETHERSWITCH_PORT_LED_DEFAULT;
+
+ p->es_led[led] = style;
+ }
+ } else
+ {
+ p->es_nleds = 0;
+ }
+
return (0);
}
@@ -727,7 +799,7 @@ ar8xxx_port_vlan_setup(struct arswitch_softc *sc, etherswitch_port_t *p)
static int
arswitch_setport(device_t dev, etherswitch_port_t *p)
{
- int err;
+ int err, i;
struct arswitch_softc *sc;
struct ifmedia *ifm;
struct mii_data *mii;
@@ -744,9 +816,20 @@ arswitch_setport(device_t dev, etherswitch_port_t *p)
return (err);
}
- /* Do not allow media changes on CPU port. */
+ /* Do not allow media or led changes on CPU port. */
if (arswitch_is_cpuport(sc, p->es_port))
return (0);
+
+ if (AR8X16_IS_SWITCH(sc, AR8327))
+ {
+ for (i = 0; i < 3; i++)
+ {
+ int err;
+ err = arswitch_setled(sc, p->es_port-1, i, p->es_led[i]);
+ if (err)
+ return (err);
+ }
+ }
mii = arswitch_miiforport(sc, p->es_port);
if (mii == NULL)
@@ -758,11 +841,29 @@ arswitch_setport(device_t dev, etherswitch_port_t *p)
return (ifmedia_ioctl(ifp, &p->es_ifr, ifm, SIOCSIFMEDIA));
}
+static int
+arswitch_setled(struct arswitch_softc *sc, int phy, int led, int style)
+{
+ int shift;
+
+ if (phy < 0 || phy > sc->numphys)
+ return EINVAL;
+
+ if (style < 0 || style > ETHERSWITCH_PORT_LED_MAX)
+ return (EINVAL);
+
+ shift = ar8327_led_mapping[phy][led].shift;
+ return (arswitch_modifyreg(sc->sc_dev,
+ ar8327_led_mapping[phy][led].reg,
+ 0x03 << shift, led_pattern_table[style] << shift));
+}
+
static void
arswitch_statchg(device_t dev)
{
+ struct arswitch_softc *sc = device_get_softc(dev);
- DPRINTF(dev, "%s\n", __func__);
+ DPRINTF(sc, ARSWITCH_DBG_POLL, "%s\n", __func__);
}
static int
@@ -783,7 +884,7 @@ arswitch_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr)
struct arswitch_softc *sc = ifp->if_softc;
struct mii_data *mii = arswitch_miiforport(sc, ifp->if_dunit);
- DPRINTF(sc->sc_dev, "%s\n", __func__);
+ DPRINTF(sc, ARSWITCH_DBG_POLL, "%s\n", __func__);
if (mii == NULL)
return;
diff --git a/sys/dev/etherswitch/arswitch/arswitch_7240.c b/sys/dev/etherswitch/arswitch/arswitch_7240.c
index 3d2543a7017f..01ea9b3e9c1b 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_7240.c
+++ b/sys/dev/etherswitch/arswitch/arswitch_7240.c
@@ -97,6 +97,8 @@ ar7240_hw_global_setup(struct arswitch_softc *sc)
AR7240_GLOBAL_CTRL_MTU_MASK,
SM(1536, AR7240_GLOBAL_CTRL_MTU_MASK));
+ /* XXX ARP? Frame Age enable? */
+
/* Service Tag */
arswitch_modifyreg(sc->sc_dev, AR8X16_REG_SERVICE_TAG,
AR8X16_SERVICE_TAG_MASK, 0);
diff --git a/sys/dev/etherswitch/arswitch/arswitch_8316.c b/sys/dev/etherswitch/arswitch/arswitch_8316.c
index d646e182f80c..54d6618bd805 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_8316.c
+++ b/sys/dev/etherswitch/arswitch/arswitch_8316.c
@@ -137,6 +137,7 @@ ar8316_hw_global_setup(struct arswitch_softc *sc)
arswitch_writereg(sc->sc_dev, AR8X16_REG_TAG_PRIO, 0xfa50);
/* Enable ARP frame acknowledge. */
+ /* XXX TODO: aging? */
arswitch_modifyreg(sc->sc_dev, AR8X16_REG_AT_CTRL, 0,
AR8X16_AT_CTRL_ARP_EN);
diff --git a/sys/dev/etherswitch/arswitch/arswitch_8327.c b/sys/dev/etherswitch/arswitch/arswitch_8327.c
index 92e44fc9acb4..013688f1cce4 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_8327.c
+++ b/sys/dev/etherswitch/arswitch/arswitch_8327.c
@@ -75,6 +75,36 @@
* lead to traffic storms/loops.
*/
+/* Map port+led to register+shift */
+struct ar8327_led_mapping ar8327_led_mapping[AR8327_NUM_PHYS][ETHERSWITCH_PORT_MAX_LEDS] =
+{
+ { /* PHY0 */
+ {AR8327_REG_LED_CTRL0, 14 },
+ {AR8327_REG_LED_CTRL1, 14 },
+ {AR8327_REG_LED_CTRL2, 14 }
+ },
+ { /* PHY1 */
+ {AR8327_REG_LED_CTRL3, 8 },
+ {AR8327_REG_LED_CTRL3, 10 },
+ {AR8327_REG_LED_CTRL3, 12 }
+ },
+ { /* PHY2 */
+ {AR8327_REG_LED_CTRL3, 14 },
+ {AR8327_REG_LED_CTRL3, 16 },
+ {AR8327_REG_LED_CTRL3, 18 }
+ },
+ { /* PHY3 */
+ {AR8327_REG_LED_CTRL3, 20 },
+ {AR8327_REG_LED_CTRL3, 22 },
+ {AR8327_REG_LED_CTRL3, 24 }
+ },
+ { /* PHY4 */
+ {AR8327_REG_LED_CTRL0, 30 },
+ {AR8327_REG_LED_CTRL1, 30 },
+ {AR8327_REG_LED_CTRL2, 30 }
+ }
+};
+
static int
ar8327_vlan_op(struct arswitch_softc *sc, uint32_t op, uint32_t vid,
uint32_t data)
@@ -1056,7 +1086,7 @@ ar8327_get_dot1q_vlan(struct arswitch_softc *sc, uint32_t *ports,
}
reg = arswitch_readreg(sc->sc_dev, AR8327_REG_VTU_FUNC0);
- DPRINTF(sc->sc_dev, "%s: %d: reg=0x%08x\n", __func__, vid, reg);
+ DPRINTF(sc, ARSWITCH_DBG_REGIO, "%s: %d: reg=0x%08x\n", __func__, vid, reg);
/*
* If any of the bits are set, update the port mask.
@@ -1088,7 +1118,7 @@ ar8327_set_dot1q_vlan(struct arswitch_softc *sc, uint32_t ports,
op = AR8327_VTU_FUNC1_OP_LOAD;
vid &= 0xfff;
- DPRINTF(sc->sc_dev,
+ DPRINTF(sc, ARSWITCH_DBG_VLAN,
"%s: vid: %d, ports=0x%08x, untagged_ports=0x%08x\n",
__func__,
vid,
diff --git a/sys/dev/etherswitch/arswitch/arswitch_8327.h b/sys/dev/etherswitch/arswitch/arswitch_8327.h
index 1f35d96639d1..e2a5f5ec7fdd 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_8327.h
+++ b/sys/dev/etherswitch/arswitch/arswitch_8327.h
@@ -85,6 +85,11 @@ struct ar8327_port_cfg {
uint32_t rxpause;
};
+extern struct ar8327_led_mapping {
+ int reg;
+ int shift;
+} ar8327_led_mapping[AR8327_NUM_PHYS][ETHERSWITCH_PORT_MAX_LEDS];
+
extern void ar8327_attach(struct arswitch_softc *sc);
#endif /* __ARSWITCH_8327_H__ */
diff --git a/sys/dev/etherswitch/arswitch/arswitch_phy.c b/sys/dev/etherswitch/arswitch/arswitch_phy.c
index ceeb307efb6d..cb9d0a0a40b5 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_phy.c
+++ b/sys/dev/etherswitch/arswitch/arswitch_phy.c
@@ -62,10 +62,6 @@
#include "miibus_if.h"
#include "etherswitch_if.h"
-#if defined(DEBUG)
-static SYSCTL_NODE(_debug, OID_AUTO, arswitch, CTLFLAG_RD, 0, "arswitch");
-#endif
-
/*
* Access PHYs integrated into the switch by going direct
* to the PHY space itself, rather than through the switch
@@ -81,6 +77,9 @@ arswitch_readphy_external(device_t dev, int phy, int reg)
ARSWITCH_LOCK(sc);
ret = (MDIO_READREG(device_get_parent(dev), phy, reg));
+ DPRINTF(sc, ARSWITCH_DBG_PHYIO,
+ "%s: phy=0x%08x, reg=0x%08x, ret=0x%08x\n",
+ __func__, phy, reg, ret);
ARSWITCH_UNLOCK(sc);
return (ret);
@@ -96,6 +95,9 @@ arswitch_writephy_external(device_t dev, int phy, int reg, int data)
ARSWITCH_LOCK(sc);
(void) MDIO_WRITEREG(device_get_parent(dev), phy,
reg, data);
+ DPRINTF(sc, ARSWITCH_DBG_PHYIO,
+ "%s: phy=0x%08x, reg=0x%08x, data=0x%08x\n",
+ __func__, phy, reg, data);
ARSWITCH_UNLOCK(sc);
return (0);
@@ -141,7 +143,9 @@ arswitch_readphy_internal(device_t dev, int phy, int reg)
break;
}
if (timeout < 0) {
- DPRINTF(dev, "arswitch_readphy(): phy=%d.%02x; timeout=%d\n", phy, reg, timeout);
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "arswitch_readphy(): phy=%d.%02x; timeout=%d\n",
+ phy, reg, timeout);
goto fail;
}
data = arswitch_readreg_lsb(dev, a) &
diff --git a/sys/dev/etherswitch/arswitch/arswitch_reg.c b/sys/dev/etherswitch/arswitch/arswitch_reg.c
index 420e51644132..fb6baeb95441 100644
--- a/sys/dev/etherswitch/arswitch/arswitch_reg.c
+++ b/sys/dev/etherswitch/arswitch/arswitch_reg.c
@@ -232,6 +232,9 @@ arswitch_modifyreg(device_t dev, int addr, int mask, int set)
int value;
uint16_t phy, reg;
+ ARSWITCH_LOCK_ASSERT((struct arswitch_softc *)device_get_softc(dev),
+ MA_OWNED);
+
arswitch_split_setpage(dev, addr, &phy, &reg);
value = arswitch_reg_read32(dev, 0x10 | phy, reg);
@@ -243,9 +246,12 @@ arswitch_modifyreg(device_t dev, int addr, int mask, int set)
int
arswitch_waitreg(device_t dev, int addr, int mask, int val, int timeout)
{
+ struct arswitch_softc *sc = device_get_softc(dev);
int err, v;
uint16_t phy, reg;
+ ARSWITCH_LOCK_ASSERT(sc, MA_OWNED);
+
arswitch_split_setpage(dev, addr, &phy, &reg);
err = -1;
@@ -261,5 +267,10 @@ arswitch_waitreg(device_t dev, int addr, int mask, int val, int timeout)
DELAY(1);
timeout--;
}
+ if (err != 0) {
+ DPRINTF(sc, ARSWITCH_DBG_ANY,
+ "%s: waitreg failed; addr=0x%08x, mask=0x%08x, val=0x%08x\n",
+ __func__, addr, mask, val);
+ }
return (err);
}
diff --git a/sys/dev/etherswitch/arswitch/arswitchvar.h b/sys/dev/etherswitch/arswitch/arswitchvar.h
index a322a4fa9aac..19731f3b9413 100644
--- a/sys/dev/etherswitch/arswitch/arswitchvar.h
+++ b/sys/dev/etherswitch/arswitch/arswitchvar.h
@@ -48,6 +48,15 @@ typedef enum {
#define ARSWITCH_NUM_PORTS MAX(AR8327_NUM_PORTS, AR8X16_NUM_PORTS)
#define ARSWITCH_NUM_PHYS MAX(AR8327_NUM_PHYS, AR8X16_NUM_PHYS)
+#define ARSWITCH_NUM_LEDS 3
+
+struct arswitch_dev_led {
+ struct arswitch_softc *sc;
+ struct cdev *led;
+ int phy;
+ int lednum;
+};
+
struct arswitch_softc {
struct mtx sc_mtx; /* serialize access to softc */
device_t sc_dev;
@@ -66,9 +75,12 @@ struct arswitch_softc {
char *ifname[ARSWITCH_NUM_PHYS];
device_t miibus[ARSWITCH_NUM_PHYS];
struct ifnet *ifp[ARSWITCH_NUM_PHYS];
+ struct arswitch_dev_led dev_led[ARSWITCH_NUM_PHYS][ARSWITCH_NUM_LEDS];
struct callout callout_tick;
etherswitch_info_t info;
+ uint32_t sc_debug;
+
/* VLANs support */
int vid[AR8X16_MAX_VLANS];
uint32_t vlan_mode;
@@ -132,18 +144,27 @@ struct arswitch_softc {
#define ARSWITCH_TRYLOCK(_sc) \
mtx_trylock(&(_sc)->sc_mtx)
-#if defined(DEBUG)
-#define DPRINTF(dev, args...) device_printf(dev, args)
+#define ARSWITCH_DBG_RESET 0x00000001
+#define ARSWITCH_DBG_REGIO 0x00000002
+#define ARSWITCH_DBG_PHYIO 0x00000004
+#define ARSWITCH_DBG_POLL 0x00000008
+#define ARSWITCH_DBG_VLAN 0x00000010
+#define ARSWITCH_DBG_ANY 0xffffffff
+
+#if 1
+#define DPRINTF(sc, dbg, args...) \
+ do { \
+ if (((sc)->sc_debug & (dbg)) || \
+ ((sc)->sc_debug == ARSWITCH_DBG_ANY)) { \
+ device_printf((sc)->sc_dev, args); \
+ } \
+ } while (0)
#define DEVERR(dev, err, fmt, args...) do { \
if (err != 0) device_printf(dev, fmt, err, args); \
} while (0)
-#define DEBUG_INCRVAR(var) do { \
- var++; \
- } while (0)
#else
-#define DPRINTF(dev, args...)
+#define DPRINTF(dev, dbg, args...)
#define DEVERR(dev, err, fmt, args...)
-#define DEBUG_INCRVAR(var)
#endif
#endif /* __ARSWITCHVAR_H__ */
diff --git a/sys/dev/etherswitch/etherswitch.h b/sys/dev/etherswitch/etherswitch.h
index 26190198b529..0076177ebf99 100644
--- a/sys/dev/etherswitch/etherswitch.h
+++ b/sys/dev/etherswitch/etherswitch.h
@@ -14,7 +14,7 @@ extern driver_t etherswitch_driver;
struct etherswitch_reg {
uint16_t reg;
- uint16_t val;
+ uint32_t val;
};
typedef struct etherswitch_reg etherswitch_reg_t;
@@ -64,10 +64,23 @@ typedef struct etherswitch_conf etherswitch_conf_t;
#define ETHERSWITCH_PORT_FLAGS_BITS \
"\020\1CPUPORT\2STRIPTAG\3ADDTAG\4FIRSTLOCK\5DROPUNTAGGED\6QinQ\7INGRESS"
+#define ETHERSWITCH_PORT_MAX_LEDS 3
+
+enum etherswitch_port_led {
+ ETHERSWITCH_PORT_LED_DEFAULT,
+ ETHERSWITCH_PORT_LED_ON,
+ ETHERSWITCH_PORT_LED_OFF,
+ ETHERSWITCH_PORT_LED_BLINK,
+ ETHERSWITCH_PORT_LED_MAX
+};
+typedef enum etherswitch_port_led etherswitch_port_led_t;
+
struct etherswitch_port {
int es_port;
int es_pvid;
+ int es_nleds;
uint32_t es_flags;
+ etherswitch_port_led_t es_led[ETHERSWITCH_PORT_MAX_LEDS];
union {
struct ifreq es_uifr;
struct ifmediareq es_uifmr;
diff --git a/sys/dev/etherswitch/ip17x/ip17x.c b/sys/dev/etherswitch/ip17x/ip17x.c
index 43cdddb27ff2..e39663498679 100644
--- a/sys/dev/etherswitch/ip17x/ip17x.c
+++ b/sys/dev/etherswitch/ip17x/ip17x.c
@@ -84,7 +84,7 @@ ip17x_probe(device_t dev)
phy_id1 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR1);
phy_id2 = MDIO_READREG(device_get_parent(dev), 0, MII_PHYIDR2);
- oui = MII_OUI(phy_id1, phy_id2),
+ oui = MII_OUI(phy_id1, phy_id2);
model = MII_MODEL(phy_id2);
/* We only care about IC+ devices. */
if (oui != IP17X_OUI) {
diff --git a/sys/dev/fdt/fdt_common.c b/sys/dev/fdt/fdt_common.c
index 4e0d6e206c5b..2f4555f81a4d 100644
--- a/sys/dev/fdt/fdt_common.c
+++ b/sys/dev/fdt/fdt_common.c
@@ -38,6 +38,7 @@ __FBSDID("$FreeBSD$");
#include <sys/module.h>
#include <sys/bus.h>
#include <sys/limits.h>
+#include <sys/sysctl.h>
#include <machine/resource.h>
@@ -60,6 +61,8 @@ __FBSDID("$FreeBSD$");
#define FDT_REG_CELLS 4
+SYSCTL_NODE(_hw, OID_AUTO, fdt, CTLFLAG_RD, 0, "Flattened Device Tree");
+
vm_paddr_t fdt_immr_pa;
vm_offset_t fdt_immr_va;
vm_offset_t fdt_immr_size;
diff --git a/sys/dev/fdt/fdt_common.h b/sys/dev/fdt/fdt_common.h
index 94f84ff3fe49..afddc310c127 100644
--- a/sys/dev/fdt/fdt_common.h
+++ b/sys/dev/fdt/fdt_common.h
@@ -32,6 +32,7 @@
#ifndef _FDT_COMMON_H_
#define _FDT_COMMON_H_
+#include <sys/sysctl.h>
#include <sys/slicer.h>
#include <contrib/libfdt/libfdt_env.h>
#include <dev/ofw/ofw_bus.h>
@@ -80,6 +81,8 @@ extern struct fdt_pm_mask_entry fdt_pm_mask_table[];
extern u_char fdt_static_dtb;
#endif
+SYSCTL_DECL(_hw_fdt);
+
int fdt_addrsize_cells(phandle_t, int *, int *);
u_long fdt_data_get(void *, int);
int fdt_data_to_res(pcell_t *, int, int, u_long *, u_long *);
diff --git a/sys/dev/filemon/filemon.c b/sys/dev/filemon/filemon.c
index 919af9d6dbe6..26e1bc38d071 100644
--- a/sys/dev/filemon/filemon.c
+++ b/sys/dev/filemon/filemon.c
@@ -137,6 +137,8 @@ filemon_proc_get(struct proc *p)
{
struct filemon *filemon;
+ if (p->p_filemon == NULL)
+ return (NULL);
PROC_LOCK(p);
filemon = filemon_acquire(p->p_filemon);
PROC_UNLOCK(p);
diff --git a/sys/dev/gpio/gpioled.c b/sys/dev/gpio/gpioled.c
index bd77f5386d53..2c579e404bc3 100644
--- a/sys/dev/gpio/gpioled.c
+++ b/sys/dev/gpio/gpioled.c
@@ -66,6 +66,7 @@ struct gpioled_softc
device_t sc_busdev;
struct mtx sc_mtx;
struct cdev *sc_leddev;
+ int sc_invert;
};
static void gpioled_control(void *, int);
@@ -82,6 +83,8 @@ gpioled_control(void *priv, int onoff)
GPIOLED_LOCK(sc);
if (GPIOBUS_PIN_SETFLAGS(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
GPIO_PIN_OUTPUT) == 0) {
+ if (sc->sc_invert)
+ onoff = !onoff;
GPIOBUS_PIN_SET(sc->sc_busdev, sc->sc_dev, GPIOLED_PIN,
onoff ? GPIO_PIN_HIGH : GPIO_PIN_LOW);
}
@@ -199,6 +202,8 @@ gpioled_attach(device_t dev)
if (resource_string_value(device_get_name(dev),
device_get_unit(dev), "name", &name))
name = NULL;
+ resource_int_value(device_get_name(dev),
+ device_get_unit(dev), "invert", &sc->sc_invert);
#endif
sc->sc_leddev = led_create_state(gpioled_control, sc, name ? name :
diff --git a/sys/dev/hwpmc/hwpmc_core.c b/sys/dev/hwpmc/hwpmc_core.c
index a54ac27cc391..c734c5ad3c1a 100644
--- a/sys/dev/hwpmc/hwpmc_core.c
+++ b/sys/dev/hwpmc/hwpmc_core.c
@@ -365,7 +365,7 @@ iaf_read_pmc(int cpu, int ri, pmc_value_t *v)
if (PMC_IS_SAMPLING_MODE(PMC_TO_MODE(pm)))
*v = iaf_perfctr_value_to_reload_count(tmp);
else
- *v = tmp;
+ *v = tmp & ((1ULL << core_iaf_width) - 1);
PMCDBG4(MDP,REA,1, "iaf-read cpu=%d ri=%d msr=0x%x -> v=%jx", cpu, ri,
IAF_RI_TO_MSR(ri), *v);
diff --git a/sys/dev/hyperv/include/hyperv.h b/sys/dev/hyperv/include/hyperv.h
index a502a648914f..f35685645935 100644
--- a/sys/dev/hyperv/include/hyperv.h
+++ b/sys/dev/hyperv/include/hyperv.h
@@ -28,43 +28,21 @@
* $FreeBSD$
*/
-/**
- * HyperV definitions for messages that are sent between instances of the
- * Channel Management Library in separate partitions, or in some cases,
- * back to itself.
- */
-
-#ifndef __HYPERV_H__
-#define __HYPERV_H__
+#ifndef _HYPERV_H_
+#define _HYPERV_H_
#include <sys/param.h>
-#include <sys/mbuf.h>
-#include <sys/queue.h>
-#include <sys/malloc.h>
-#include <sys/kthread.h>
-#include <sys/taskqueue.h>
-#include <sys/systm.h>
-#include <sys/lock.h>
-#include <sys/sema.h>
-#include <sys/smp.h>
-#include <sys/mutex.h>
-#include <sys/bus.h>
-#include <sys/sysctl.h>
+
#include <vm/vm.h>
-#include <vm/vm_param.h>
#include <vm/pmap.h>
-#include <amd64/include/xen/synch_bitops.h>
-#include <amd64/include/atomic.h>
-#include <dev/hyperv/include/hyperv_busdma.h>
-
struct hyperv_guid {
uint8_t hv_guid[16];
} __packed;
#define HYPERV_GUID_STRLEN 40
-int hyperv_guid2str(const struct hyperv_guid *, char *, size_t);
+int hyperv_guid2str(const struct hyperv_guid *, char *, size_t);
/**
* @brief Get physical address from virtual
@@ -77,4 +55,4 @@ hv_get_phys_addr(void *virt)
return (ret);
}
-#endif /* __HYPERV_H__ */
+#endif /* _HYPERV_H_ */
diff --git a/sys/dev/hyperv/include/hyperv_busdma.h b/sys/dev/hyperv/include/hyperv_busdma.h
index a27d2dba7bf4..ff01b3e27a95 100644
--- a/sys/dev/hyperv/include/hyperv_busdma.h
+++ b/sys/dev/hyperv/include/hyperv_busdma.h
@@ -39,11 +39,11 @@ struct hyperv_dma {
bus_dmamap_t hv_dmap;
};
-void hyperv_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg,
- int error);
-void *hyperv_dmamem_alloc(bus_dma_tag_t parent_dtag, bus_size_t alignment,
- bus_addr_t boundary, bus_size_t size, struct hyperv_dma *dma,
- int flags);
-void hyperv_dmamem_free(struct hyperv_dma *dma, void *ptr);
+void hyperv_dma_map_paddr(void *arg, bus_dma_segment_t *segs,
+ int nseg, int error);
+void *hyperv_dmamem_alloc(bus_dma_tag_t parent_dtag,
+ bus_size_t alignment, bus_addr_t boundary, bus_size_t size,
+ struct hyperv_dma *dma, int flags);
+void hyperv_dmamem_free(struct hyperv_dma *dma, void *ptr);
#endif /* !_HYPERV_BUSDMA_H_ */
diff --git a/sys/dev/hyperv/include/vmbus.h b/sys/dev/hyperv/include/vmbus.h
index 7c06580dcf4d..5843f0345e0a 100644
--- a/sys/dev/hyperv/include/vmbus.h
+++ b/sys/dev/hyperv/include/vmbus.h
@@ -30,6 +30,7 @@
#define _VMBUS_H_
#include <sys/param.h>
+#include <sys/bus.h>
/*
* VMBUS version is 32 bit, upper 16 bit for major_number and lower
@@ -82,12 +83,18 @@ struct vmbus_chanpkt_hdr {
#define VMBUS_CHANPKT_TYPE_GPA 0x0009
#define VMBUS_CHANPKT_TYPE_COMP 0x000b
+#define VMBUS_CHANPKT_FLAG_NONE 0
#define VMBUS_CHANPKT_FLAG_RC 0x0001 /* report completion */
#define VMBUS_CHANPKT_CONST_DATA(pkt) \
(const void *)((const uint8_t *)(pkt) + \
VMBUS_CHANPKT_GETLEN((pkt)->cph_hlen))
+/* Include padding */
+#define VMBUS_CHANPKT_DATALEN(pkt) \
+ (VMBUS_CHANPKT_GETLEN((pkt)->cph_tlen) -\
+ VMBUS_CHANPKT_GETLEN((pkt)->cph_hlen))
+
struct vmbus_rxbuf_desc {
uint32_t rb_len;
uint32_t rb_ofs;
@@ -101,9 +108,6 @@ struct vmbus_chanpkt_rxbuf {
struct vmbus_rxbuf_desc cp_rxbuf[];
} __packed;
-#define VMBUS_CHAN_SGLIST_MAX 32
-#define VMBUS_CHAN_PRPLIST_MAX 32
-
struct vmbus_channel;
struct hyperv_guid;
@@ -115,45 +119,47 @@ vmbus_get_channel(device_t dev)
return device_get_ivars(dev);
}
-int vmbus_chan_open(struct vmbus_channel *chan,
- int txbr_size, int rxbr_size, const void *udata, int udlen,
- vmbus_chan_callback_t cb, void *cbarg);
-void vmbus_chan_close(struct vmbus_channel *chan);
+int vmbus_chan_open(struct vmbus_channel *chan,
+ int txbr_size, int rxbr_size, const void *udata, int udlen,
+ vmbus_chan_callback_t cb, void *cbarg);
+void vmbus_chan_close(struct vmbus_channel *chan);
-int vmbus_chan_gpadl_connect(struct vmbus_channel *chan,
- bus_addr_t paddr, int size, uint32_t *gpadl);
-int vmbus_chan_gpadl_disconnect(struct vmbus_channel *chan,
- uint32_t gpadl);
+int vmbus_chan_gpadl_connect(struct vmbus_channel *chan,
+ bus_addr_t paddr, int size, uint32_t *gpadl);
+int vmbus_chan_gpadl_disconnect(struct vmbus_channel *chan,
+ uint32_t gpadl);
-void vmbus_chan_cpu_set(struct vmbus_channel *chan, int cpu);
-void vmbus_chan_cpu_rr(struct vmbus_channel *chan);
+void vmbus_chan_cpu_set(struct vmbus_channel *chan, int cpu);
+void vmbus_chan_cpu_rr(struct vmbus_channel *chan);
struct vmbus_channel *
- vmbus_chan_cpu2chan(struct vmbus_channel *chan, int cpu);
-void vmbus_chan_set_readbatch(struct vmbus_channel *chan, bool on);
+ vmbus_chan_cpu2chan(struct vmbus_channel *chan, int cpu);
+void vmbus_chan_set_readbatch(struct vmbus_channel *chan, bool on);
struct vmbus_channel **
- vmbus_subchan_get(struct vmbus_channel *pri_chan, int subchan_cnt);
-void vmbus_subchan_rel(struct vmbus_channel **subchan, int subchan_cnt);
-void vmbus_subchan_drain(struct vmbus_channel *pri_chan);
-
-int vmbus_chan_recv(struct vmbus_channel *chan, void *data, int *dlen,
- uint64_t *xactid);
-int vmbus_chan_recv_pkt(struct vmbus_channel *chan,
- struct vmbus_chanpkt_hdr *pkt, int *pktlen);
-
-int vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
- uint16_t flags, void *data, int dlen, uint64_t xactid);
-int vmbus_chan_send_sglist(struct vmbus_channel *chan,
- struct vmbus_gpa sg[], int sglen, void *data, int dlen,
- uint64_t xactid);
-int vmbus_chan_send_prplist(struct vmbus_channel *chan,
- struct vmbus_gpa_range *prp, int prp_cnt, void *data, int dlen,
- uint64_t xactid);
-
-uint32_t vmbus_chan_id(const struct vmbus_channel *chan);
-uint32_t vmbus_chan_subidx(const struct vmbus_channel *chan);
-bool vmbus_chan_is_primary(const struct vmbus_channel *chan);
+ vmbus_subchan_get(struct vmbus_channel *pri_chan,
+ int subchan_cnt);
+void vmbus_subchan_rel(struct vmbus_channel **subchan,
+ int subchan_cnt);
+void vmbus_subchan_drain(struct vmbus_channel *pri_chan);
+
+int vmbus_chan_recv(struct vmbus_channel *chan, void *data, int *dlen,
+ uint64_t *xactid);
+int vmbus_chan_recv_pkt(struct vmbus_channel *chan,
+ struct vmbus_chanpkt_hdr *pkt, int *pktlen);
+
+int vmbus_chan_send(struct vmbus_channel *chan, uint16_t type,
+ uint16_t flags, void *data, int dlen, uint64_t xactid);
+int vmbus_chan_send_sglist(struct vmbus_channel *chan,
+ struct vmbus_gpa sg[], int sglen, void *data, int dlen,
+ uint64_t xactid);
+int vmbus_chan_send_prplist(struct vmbus_channel *chan,
+ struct vmbus_gpa_range *prp, int prp_cnt, void *data,
+ int dlen, uint64_t xactid);
+
+uint32_t vmbus_chan_id(const struct vmbus_channel *chan);
+uint32_t vmbus_chan_subidx(const struct vmbus_channel *chan);
+bool vmbus_chan_is_primary(const struct vmbus_channel *chan);
const struct hyperv_guid *
- vmbus_chan_guid_inst(const struct vmbus_channel *chan);
+ vmbus_chan_guid_inst(const struct vmbus_channel *chan);
#endif /* !_VMBUS_H_ */
diff --git a/sys/dev/hyperv/include/vmbus_xact.h b/sys/dev/hyperv/include/vmbus_xact.h
new file mode 100644
index 000000000000..c2919aa8c1f2
--- /dev/null
+++ b/sys/dev/hyperv/include/vmbus_xact.h
@@ -0,0 +1,59 @@
+/*-
+ * Copyright (c) 2016 Microsoft Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VMBUS_XACT_H_
+#define _VMBUS_XACT_H_
+
+#include <sys/param.h>
+#include <sys/bus.h>
+
+struct vmbus_xact;
+struct vmbus_xact_ctx;
+
+struct vmbus_xact_ctx *vmbus_xact_ctx_create(bus_dma_tag_t dtag,
+ size_t req_size, size_t resp_size,
+ size_t priv_size);
+void vmbus_xact_ctx_destroy(struct vmbus_xact_ctx *ctx);
+struct vmbus_xact *vmbus_xact_get(struct vmbus_xact_ctx *ctx,
+ size_t req_len);
+void vmbus_xact_put(struct vmbus_xact *xact);
+
+void *vmbus_xact_req_data(const struct vmbus_xact *xact);
+bus_addr_t vmbus_xact_req_paddr(const struct vmbus_xact *xact);
+void *vmbus_xact_priv(const struct vmbus_xact *xact,
+ size_t priv_len);
+void vmbus_xact_activate(struct vmbus_xact *xact);
+void vmbus_xact_deactivate(struct vmbus_xact *xact);
+const void *vmbus_xact_wait(struct vmbus_xact *xact,
+ size_t *resp_len);
+void vmbus_xact_wakeup(struct vmbus_xact *xact,
+ const void *data, size_t dlen);
+void vmbus_xact_ctx_wakeup(struct vmbus_xact_ctx *ctx,
+ const void *data, size_t dlen);
+
+#endif /* !_VMBUS_XACT_H_ */
diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.c b/sys/dev/hyperv/netvsc/hv_net_vsc.c
index eb8a391480d3..38242ceba10f 100644
--- a/sys/dev/hyperv/netvsc/hv_net_vsc.c
+++ b/sys/dev/hyperv/netvsc/hv_net_vsc.c
@@ -45,9 +45,11 @@
#include <machine/atomic.h>
#include <dev/hyperv/include/hyperv.h>
-#include "hv_net_vsc.h"
-#include "hv_rndis.h"
-#include "hv_rndis_filter.h"
+#include <dev/hyperv/include/vmbus_xact.h>
+#include <dev/hyperv/netvsc/hv_net_vsc.h>
+#include <dev/hyperv/netvsc/hv_rndis.h>
+#include <dev/hyperv/netvsc/hv_rndis_filter.h>
+#include <dev/hyperv/netvsc/if_hnreg.h>
MALLOC_DEFINE(M_NETVSC, "netvsc", "Hyper-V netvsc driver");
@@ -68,6 +70,12 @@ static void hv_nv_on_receive_completion(struct vmbus_channel *chan,
static void hv_nv_on_receive(netvsc_dev *net_dev,
struct hn_rx_ring *rxr, struct vmbus_channel *chan,
const struct vmbus_chanpkt_hdr *pkt);
+static void hn_nvs_sent_none(struct hn_send_ctx *sndc,
+ struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
+ const struct nvsp_msg_ *msg, int);
+
+static struct hn_send_ctx hn_send_ctx_none =
+ HN_SEND_CTX_INITIALIZER(hn_nvs_sent_none, NULL);
/*
*
@@ -110,7 +118,7 @@ hv_nv_get_next_send_section(netvsc_dev *net_dev)
unsigned long bitsmap_words = net_dev->bitsmap_words;
unsigned long *bitsmap = net_dev->send_section_bitsmap;
unsigned long idx;
- int ret = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
+ int ret = HN_NVS_CHIM_IDX_INVALID;
int i;
for (i = 0; i < bitsmap_words; i++) {
@@ -141,9 +149,14 @@ hv_nv_get_next_send_section(netvsc_dev *net_dev)
static int
hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
{
+ struct vmbus_xact *xact;
+ struct hn_nvs_rxbuf_conn *conn;
+ const struct hn_nvs_rxbuf_connresp *resp;
+ size_t resp_len;
+ struct hn_send_ctx sndc;
netvsc_dev *net_dev;
- nvsp_msg *init_pkt;
- int ret = 0;
+ uint32_t status;
+ int error;
net_dev = hv_nv_get_outbound_net_device(sc);
if (!net_dev) {
@@ -155,7 +168,7 @@ hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (net_dev->rx_buf == NULL) {
device_printf(sc->hn_dev, "allocate rxbuf failed\n");
- return ENOMEM;
+ return (ENOMEM);
}
/*
@@ -165,73 +178,75 @@ hv_nv_init_rx_buffer_with_net_vsp(struct hn_softc *sc)
* Only primary channel has RXBUF connected to it. Sub-channels
* just share this RXBUF.
*/
- ret = vmbus_chan_gpadl_connect(sc->hn_prichan,
+ error = vmbus_chan_gpadl_connect(sc->hn_prichan,
net_dev->rxbuf_dma.hv_paddr, net_dev->rx_buf_size,
&net_dev->rx_buf_gpadl_handle);
- if (ret != 0) {
- device_printf(sc->hn_dev, "rxbuf gpadl connect failed: %d\n",
- ret);
+ if (error) {
+ if_printf(sc->hn_ifp, "rxbuf gpadl connect failed: %d\n",
+ error);
goto cleanup;
}
-
- /* sema_wait(&ext->channel_init_sema); KYS CHECK */
- /* Notify the NetVsp of the gpadl handle */
- init_pkt = &net_dev->channel_init_packet;
-
- memset(init_pkt, 0, sizeof(nvsp_msg));
-
- init_pkt->hdr.msg_type = nvsp_msg_1_type_send_rx_buf;
- init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
- net_dev->rx_buf_gpadl_handle;
- init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
- NETVSC_RECEIVE_BUFFER_ID;
-
- /* Send the gpadl notification request */
+ /*
+ * Connect RXBUF to NVS.
+ */
- ret = vmbus_chan_send(sc->hn_prichan,
- VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
- init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
- if (ret != 0) {
+ xact = vmbus_xact_get(sc->hn_xact, sizeof(*conn));
+ if (xact == NULL) {
+ if_printf(sc->hn_ifp, "no xact for nvs rxbuf conn\n");
+ error = ENXIO;
goto cleanup;
}
- sema_wait(&net_dev->channel_init_sema);
-
- /* Check the response */
- if (init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.status
- != nvsp_status_success) {
- ret = EINVAL;
+ conn = vmbus_xact_req_data(xact);
+ conn->nvs_type = HN_NVS_TYPE_RXBUF_CONN;
+ conn->nvs_gpadl = net_dev->rx_buf_gpadl_handle;
+ conn->nvs_sig = HN_NVS_RXBUF_SIG;
+
+ hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
+ vmbus_xact_activate(xact);
+
+ error = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_RC,
+ conn, sizeof(*conn), &sndc);
+ if (error != 0) {
+ if_printf(sc->hn_ifp, "send nvs rxbuf conn failed: %d\n",
+ error);
+ vmbus_xact_deactivate(xact);
+ vmbus_xact_put(xact);
goto cleanup;
}
- net_dev->rx_section_count =
- init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.num_sections;
-
- net_dev->rx_sections = malloc(net_dev->rx_section_count *
- sizeof(nvsp_1_rx_buf_section), M_NETVSC, M_WAITOK);
- memcpy(net_dev->rx_sections,
- init_pkt->msgs.vers_1_msgs.send_rx_buf_complete.sections,
- net_dev->rx_section_count * sizeof(nvsp_1_rx_buf_section));
+ resp = vmbus_xact_wait(xact, &resp_len);
+ if (resp_len < sizeof(*resp)) {
+ if_printf(sc->hn_ifp, "invalid rxbuf conn resp length %zu\n",
+ resp_len);
+ vmbus_xact_put(xact);
+ error = EINVAL;
+ goto cleanup;
+ }
+ if (resp->nvs_type != HN_NVS_TYPE_RXBUF_CONNRESP) {
+ if_printf(sc->hn_ifp, "not rxbuf conn resp, type %u\n",
+ resp->nvs_type);
+ vmbus_xact_put(xact);
+ error = EINVAL;
+ goto cleanup;
+ }
+ status = resp->nvs_status;
+ vmbus_xact_put(xact);
- /*
- * For first release, there should only be 1 section that represents
- * the entire receive buffer
- */
- if (net_dev->rx_section_count != 1
- || net_dev->rx_sections->offset != 0) {
- ret = EINVAL;
+ if (status != HN_NVS_STATUS_OK) {
+ if_printf(sc->hn_ifp, "rxbuf conn failed: %x\n", status);
+ error = EIO;
goto cleanup;
}
+ net_dev->rx_section_count = 1;
- goto exit;
+ return (0);
cleanup:
hv_nv_destroy_rx_buffer(net_dev);
-
-exit:
- return (ret);
+ return (error);
}
/*
@@ -240,9 +255,14 @@ exit:
static int
hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
{
+ struct hn_send_ctx sndc;
+ struct vmbus_xact *xact;
+ struct hn_nvs_chim_conn *chim;
+ const struct hn_nvs_chim_connresp *resp;
+ size_t resp_len;
+ uint32_t status, sectsz;
netvsc_dev *net_dev;
- nvsp_msg *init_pkt;
- int ret = 0;
+ int error;
net_dev = hv_nv_get_outbound_net_device(sc);
if (!net_dev) {
@@ -254,7 +274,7 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
BUS_DMA_WAITOK | BUS_DMA_ZERO);
if (net_dev->send_buf == NULL) {
device_printf(sc->hn_dev, "allocate chimney txbuf failed\n");
- return ENOMEM;
+ return (ENOMEM);
}
/*
@@ -264,47 +284,76 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
* Only primary channel has chimney sending buffer connected to it.
* Sub-channels just share this chimney sending buffer.
*/
- ret = vmbus_chan_gpadl_connect(sc->hn_prichan,
+ error = vmbus_chan_gpadl_connect(sc->hn_prichan,
net_dev->txbuf_dma.hv_paddr, net_dev->send_buf_size,
&net_dev->send_buf_gpadl_handle);
- if (ret != 0) {
- device_printf(sc->hn_dev, "chimney sending buffer gpadl "
- "connect failed: %d\n", ret);
+ if (error) {
+ if_printf(sc->hn_ifp, "chimney sending buffer gpadl "
+ "connect failed: %d\n", error);
goto cleanup;
}
- /* Notify the NetVsp of the gpadl handle */
-
- init_pkt = &net_dev->channel_init_packet;
-
- memset(init_pkt, 0, sizeof(nvsp_msg));
+ /*
+ * Connect chimney sending buffer to NVS
+ */
- init_pkt->hdr.msg_type = nvsp_msg_1_type_send_send_buf;
- init_pkt->msgs.vers_1_msgs.send_rx_buf.gpadl_handle =
- net_dev->send_buf_gpadl_handle;
- init_pkt->msgs.vers_1_msgs.send_rx_buf.id =
- NETVSC_SEND_BUFFER_ID;
+ xact = vmbus_xact_get(sc->hn_xact, sizeof(*chim));
+ if (xact == NULL) {
+ if_printf(sc->hn_ifp, "no xact for nvs chim conn\n");
+ error = ENXIO;
+ goto cleanup;
+ }
- /* Send the gpadl notification request */
+ chim = vmbus_xact_req_data(xact);
+ chim->nvs_type = HN_NVS_TYPE_CHIM_CONN;
+ chim->nvs_gpadl = net_dev->send_buf_gpadl_handle;
+ chim->nvs_sig = HN_NVS_CHIM_SIG;
+
+ hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
+ vmbus_xact_activate(xact);
+
+ error = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_RC,
+ chim, sizeof(*chim), &sndc);
+ if (error) {
+ if_printf(sc->hn_ifp, "send nvs chim conn failed: %d\n",
+ error);
+ vmbus_xact_deactivate(xact);
+ vmbus_xact_put(xact);
+ goto cleanup;
+ }
- ret = vmbus_chan_send(sc->hn_prichan,
- VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
- init_pkt, sizeof(nvsp_msg), (uint64_t)init_pkt);
- if (ret != 0) {
+ resp = vmbus_xact_wait(xact, &resp_len);
+ if (resp_len < sizeof(*resp)) {
+ if_printf(sc->hn_ifp, "invalid chim conn resp length %zu\n",
+ resp_len);
+ vmbus_xact_put(xact);
+ error = EINVAL;
+ goto cleanup;
+ }
+ if (resp->nvs_type != HN_NVS_TYPE_CHIM_CONNRESP) {
+ if_printf(sc->hn_ifp, "not chim conn resp, type %u\n",
+ resp->nvs_type);
+ vmbus_xact_put(xact);
+ error = EINVAL;
goto cleanup;
}
- sema_wait(&net_dev->channel_init_sema);
+ status = resp->nvs_status;
+ sectsz = resp->nvs_sectsz;
+ vmbus_xact_put(xact);
- /* Check the response */
- if (init_pkt->msgs.vers_1_msgs.send_send_buf_complete.status
- != nvsp_status_success) {
- ret = EINVAL;
+ if (status != HN_NVS_STATUS_OK) {
+ if_printf(sc->hn_ifp, "chim conn failed: %x\n", status);
+ error = EIO;
goto cleanup;
}
+ if (sectsz == 0) {
+ if_printf(sc->hn_ifp, "zero chimney sending buffer "
+ "section size\n");
+ return 0;
+ }
- net_dev->send_section_size =
- init_pkt->msgs.vers_1_msgs.send_send_buf_complete.section_size;
+ net_dev->send_section_size = sectsz;
net_dev->send_section_count =
net_dev->send_buf_size / net_dev->send_section_size;
net_dev->bitsmap_words = howmany(net_dev->send_section_count,
@@ -313,13 +362,15 @@ hv_nv_init_send_buffer_with_net_vsp(struct hn_softc *sc)
malloc(net_dev->bitsmap_words * sizeof(long), M_NETVSC,
M_WAITOK | M_ZERO);
- goto exit;
+ if (bootverbose) {
+ if_printf(sc->hn_ifp, "chimney sending buffer %u/%u\n",
+ net_dev->send_section_size, net_dev->send_section_count);
+ }
+ return 0;
cleanup:
hv_nv_destroy_send_buffer(net_dev);
-
-exit:
- return (ret);
+ return (error);
}
/*
@@ -328,35 +379,28 @@ exit:
static int
hv_nv_destroy_rx_buffer(netvsc_dev *net_dev)
{
- nvsp_msg *revoke_pkt;
int ret = 0;
- /*
- * If we got a section count, it means we received a
- * send_rx_buf_complete msg
- * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore,
- * we need to send a revoke msg here
- */
if (net_dev->rx_section_count) {
- /* Send the revoke receive buffer */
- revoke_pkt = &net_dev->revoke_packet;
- memset(revoke_pkt, 0, sizeof(nvsp_msg));
-
- revoke_pkt->hdr.msg_type = nvsp_msg_1_type_revoke_rx_buf;
- revoke_pkt->msgs.vers_1_msgs.revoke_rx_buf.id =
- NETVSC_RECEIVE_BUFFER_ID;
-
- ret = vmbus_chan_send(net_dev->sc->hn_prichan,
- VMBUS_CHANPKT_TYPE_INBAND, 0, revoke_pkt, sizeof(nvsp_msg),
- (uint64_t)(uintptr_t)revoke_pkt);
+ struct hn_nvs_rxbuf_disconn disconn;
/*
- * If we failed here, we might as well return and have a leak
- * rather than continue and a bugchk
+ * Disconnect RXBUF from NVS.
*/
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.nvs_type = HN_NVS_TYPE_RXBUF_DISCONN;
+ disconn.nvs_sig = HN_NVS_RXBUF_SIG;
+
+ /* NOTE: No response. */
+ ret = hn_nvs_send(net_dev->sc->hn_prichan,
+ VMBUS_CHANPKT_FLAG_NONE, &disconn, sizeof(disconn),
+ &hn_send_ctx_none);
if (ret != 0) {
+ if_printf(net_dev->sc->hn_ifp,
+ "send rxbuf disconn failed: %d\n", ret);
return (ret);
}
+ net_dev->rx_section_count = 0;
}
/* Tear down the gpadl on the vsp end */
@@ -379,12 +423,6 @@ hv_nv_destroy_rx_buffer(netvsc_dev *net_dev)
net_dev->rx_buf = NULL;
}
- if (net_dev->rx_sections) {
- free(net_dev->rx_sections, M_NETVSC);
- net_dev->rx_sections = NULL;
- net_dev->rx_section_count = 0;
- }
-
return (ret);
}
@@ -394,34 +432,25 @@ hv_nv_destroy_rx_buffer(netvsc_dev *net_dev)
static int
hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
{
- nvsp_msg *revoke_pkt;
int ret = 0;
- /*
- * If we got a section count, it means we received a
- * send_rx_buf_complete msg
- * (ie sent nvsp_msg_1_type_send_rx_buf msg) therefore,
- * we need to send a revoke msg here
- */
if (net_dev->send_section_size) {
- /* Send the revoke send buffer */
- revoke_pkt = &net_dev->revoke_packet;
- memset(revoke_pkt, 0, sizeof(nvsp_msg));
-
- revoke_pkt->hdr.msg_type =
- nvsp_msg_1_type_revoke_send_buf;
- revoke_pkt->msgs.vers_1_msgs.revoke_send_buf.id =
- NETVSC_SEND_BUFFER_ID;
-
- ret = vmbus_chan_send(net_dev->sc->hn_prichan,
- VMBUS_CHANPKT_TYPE_INBAND, 0,
- revoke_pkt, sizeof(nvsp_msg),
- (uint64_t)(uintptr_t)revoke_pkt);
+ struct hn_nvs_chim_disconn disconn;
+
/*
- * If we failed here, we might as well return and have a leak
- * rather than continue and a bugchk
+ * Disconnect chimney sending buffer from NVS.
*/
+ memset(&disconn, 0, sizeof(disconn));
+ disconn.nvs_type = HN_NVS_TYPE_CHIM_DISCONN;
+ disconn.nvs_sig = HN_NVS_CHIM_SIG;
+
+ /* NOTE: No response. */
+ ret = hn_nvs_send(net_dev->sc->hn_prichan,
+ VMBUS_CHANPKT_FLAG_NONE, &disconn, sizeof(disconn),
+ &hn_send_ctx_none);
if (ret != 0) {
+ if_printf(net_dev->sc->hn_ifp,
+ "send chim disconn failed: %d\n", ret);
return (ret);
}
}
@@ -454,43 +483,63 @@ hv_nv_destroy_send_buffer(netvsc_dev *net_dev)
return (ret);
}
-
-/*
- * Attempt to negotiate the caller-specified NVSP version
- *
- * For NVSP v2, Server 2008 R2 does not set
- * init_pkt->msgs.init_msgs.init_compl.negotiated_prot_vers
- * to the negotiated version, so we cannot rely on that.
- */
static int
hv_nv_negotiate_nvsp_protocol(struct hn_softc *sc, netvsc_dev *net_dev,
- uint32_t nvsp_ver)
+ uint32_t nvs_ver)
{
- nvsp_msg *init_pkt;
- int ret;
-
- init_pkt = &net_dev->channel_init_packet;
- memset(init_pkt, 0, sizeof(nvsp_msg));
- init_pkt->hdr.msg_type = nvsp_msg_type_init;
+ struct hn_send_ctx sndc;
+ struct vmbus_xact *xact;
+ struct hn_nvs_init *init;
+ const struct hn_nvs_init_resp *resp;
+ size_t resp_len;
+ uint32_t status;
+ int error;
+
+ xact = vmbus_xact_get(sc->hn_xact, sizeof(*init));
+ if (xact == NULL) {
+ if_printf(sc->hn_ifp, "no xact for nvs init\n");
+ return (ENXIO);
+ }
- /*
- * Specify parameter as the only acceptable protocol version
- */
- init_pkt->msgs.init_msgs.init.p1.protocol_version = nvsp_ver;
- init_pkt->msgs.init_msgs.init.protocol_version_2 = nvsp_ver;
+ init = vmbus_xact_req_data(xact);
+ init->nvs_type = HN_NVS_TYPE_INIT;
+ init->nvs_ver_min = nvs_ver;
+ init->nvs_ver_max = nvs_ver;
+
+ vmbus_xact_activate(xact);
+ hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
+
+ error = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_RC,
+ init, sizeof(*init), &sndc);
+ if (error) {
+ if_printf(sc->hn_ifp, "send nvs init failed: %d\n", error);
+ vmbus_xact_deactivate(xact);
+ vmbus_xact_put(xact);
+ return (error);
+ }
- /* Send the init request */
- ret = vmbus_chan_send(sc->hn_prichan,
- VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
- init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
- if (ret != 0)
- return (-1);
+ resp = vmbus_xact_wait(xact, &resp_len);
+ if (resp_len < sizeof(*resp)) {
+ if_printf(sc->hn_ifp, "invalid init resp length %zu\n",
+ resp_len);
+ vmbus_xact_put(xact);
+ return (EINVAL);
+ }
+ if (resp->nvs_type != HN_NVS_TYPE_INIT_RESP) {
+ if_printf(sc->hn_ifp, "not init resp, type %u\n",
+ resp->nvs_type);
+ vmbus_xact_put(xact);
+ return (EINVAL);
+ }
- sema_wait(&net_dev->channel_init_sema);
+ status = resp->nvs_status;
+ vmbus_xact_put(xact);
- if (init_pkt->msgs.init_msgs.init_compl.status != nvsp_status_success)
+ if (status != HN_NVS_STATUS_OK) {
+ if_printf(sc->hn_ifp, "nvs init failed for ver 0x%x\n",
+ nvs_ver);
return (EINVAL);
-
+ }
return (0);
}
@@ -502,33 +551,20 @@ hv_nv_negotiate_nvsp_protocol(struct hn_softc *sc, netvsc_dev *net_dev,
static int
hv_nv_send_ndis_config(struct hn_softc *sc, uint32_t mtu)
{
- netvsc_dev *net_dev;
- nvsp_msg *init_pkt;
- int ret;
-
- net_dev = hv_nv_get_outbound_net_device(sc);
- if (!net_dev)
- return (-ENODEV);
-
- /*
- * Set up configuration packet, write MTU
- * Indicate we are capable of handling VLAN tags
- */
- init_pkt = &net_dev->channel_init_packet;
- memset(init_pkt, 0, sizeof(nvsp_msg));
- init_pkt->hdr.msg_type = nvsp_msg_2_type_send_ndis_config;
- init_pkt->msgs.vers_2_msgs.send_ndis_config.mtu = mtu;
- init_pkt->
- msgs.vers_2_msgs.send_ndis_config.capabilities.u1.u2.ieee8021q
- = 1;
-
- /* Send the configuration packet */
- ret = vmbus_chan_send(sc->hn_prichan, VMBUS_CHANPKT_TYPE_INBAND, 0,
- init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
- if (ret != 0)
- return (-EINVAL);
-
- return (0);
+ struct hn_nvs_ndis_conf conf;
+ int error;
+
+ memset(&conf, 0, sizeof(conf));
+ conf.nvs_type = HN_NVS_TYPE_NDIS_CONF;
+ conf.nvs_mtu = mtu;
+ conf.nvs_caps = HN_NVS_NDIS_CONF_VLAN;
+
+ /* NOTE: No response. */
+ error = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_NONE,
+ &conf, sizeof(conf), &hn_send_ctx_none);
+ if (error)
+ if_printf(sc->hn_ifp, "send nvs ndis conf failed: %d\n", error);
+ return (error);
}
/*
@@ -538,8 +574,6 @@ static int
hv_nv_connect_to_vsp(struct hn_softc *sc)
{
netvsc_dev *net_dev;
- nvsp_msg *init_pkt;
- uint32_t ndis_version;
uint32_t protocol_list[] = { NVSP_PROTOCOL_VERSION_1,
NVSP_PROTOCOL_VERSION_2,
NVSP_PROTOCOL_VERSION_4,
@@ -549,6 +583,7 @@ hv_nv_connect_to_vsp(struct hn_softc *sc)
int ret = 0;
device_t dev = sc->hn_dev;
struct ifnet *ifp = sc->hn_ifp;
+ struct hn_nvs_ndis_init ndis;
net_dev = hv_nv_get_outbound_net_device(sc);
@@ -581,37 +616,24 @@ hv_nv_connect_to_vsp(struct hn_softc *sc)
ret = hv_nv_send_ndis_config(sc, ifp->if_mtu);
/*
- * Send the NDIS version
+ * Initialize NDIS.
*/
- init_pkt = &net_dev->channel_init_packet;
- memset(init_pkt, 0, sizeof(nvsp_msg));
-
- if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_4) {
- ndis_version = NDIS_VERSION_6_1;
- } else {
- ndis_version = NDIS_VERSION_6_30;
- }
-
- init_pkt->hdr.msg_type = nvsp_msg_1_type_send_ndis_vers;
- init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_major_vers =
- (ndis_version & 0xFFFF0000) >> 16;
- init_pkt->msgs.vers_1_msgs.send_ndis_vers.ndis_minor_vers =
- ndis_version & 0xFFFF;
-
- /* Send the init request */
+ memset(&ndis, 0, sizeof(ndis));
+ ndis.nvs_type = HN_NVS_TYPE_NDIS_INIT;
+ ndis.nvs_ndis_major = NDIS_VERSION_MAJOR_6;
+ if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_4)
+ ndis.nvs_ndis_minor = NDIS_VERSION_MINOR_1;
+ else
+ ndis.nvs_ndis_minor = NDIS_VERSION_MINOR_30;
- ret = vmbus_chan_send(sc->hn_prichan, VMBUS_CHANPKT_TYPE_INBAND, 0,
- init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
+ /* NOTE: No response. */
+ ret = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_NONE,
+ &ndis, sizeof(ndis), &hn_send_ctx_none);
if (ret != 0) {
+ if_printf(sc->hn_ifp, "send nvs ndis init failed: %d\n", ret);
goto cleanup;
}
- /*
- * TODO: BUGBUG - We have to wait for the above msg since the netvsp
- * uses KMCL which acknowledges packet (completion packet)
- * since our Vmbus always set the VMBUS_CHANPKT_FLAG_RC flag
- */
- /* sema_wait(&NetVscChannel->channel_init_sema); */
/* Post the big receive buffer to NetVSP */
if (net_dev->nvsp_version <= NVSP_PROTOCOL_VERSION_2)
@@ -731,6 +753,42 @@ hv_nv_on_device_remove(struct hn_softc *sc, boolean_t destroy_channel)
return (0);
}
+void
+hn_nvs_sent_xact(struct hn_send_ctx *sndc,
+ struct netvsc_dev_ *net_dev __unused, struct vmbus_channel *chan __unused,
+ const struct nvsp_msg_ *msg, int dlen)
+{
+
+ vmbus_xact_wakeup(sndc->hn_cbarg, msg, dlen);
+}
+
+static void
+hn_nvs_sent_none(struct hn_send_ctx *sndc __unused,
+ struct netvsc_dev_ *net_dev __unused, struct vmbus_channel *chan __unused,
+ const struct nvsp_msg_ *msg __unused, int dlen __unused)
+{
+ /* EMPTY */
+}
+
+void
+hn_chim_free(struct netvsc_dev_ *net_dev, uint32_t chim_idx)
+{
+ u_long mask;
+ uint32_t idx;
+
+ idx = chim_idx / BITS_PER_LONG;
+ KASSERT(idx < net_dev->bitsmap_words,
+ ("invalid chimney index 0x%x", chim_idx));
+
+ mask = 1UL << (chim_idx % BITS_PER_LONG);
+ KASSERT(net_dev->send_section_bitsmap[idx] & mask,
+ ("index bitmap 0x%lx, chimney index %u, "
+ "bitmap idx %d, bitmask 0x%lx",
+ net_dev->send_section_bitsmap[idx], chim_idx, idx, mask));
+
+ atomic_clear_long(&net_dev->send_section_bitsmap[idx], mask);
+}
+
/*
* Net VSC on send completion
*/
@@ -738,59 +796,16 @@ static void
hv_nv_on_send_completion(netvsc_dev *net_dev, struct vmbus_channel *chan,
const struct vmbus_chanpkt_hdr *pkt)
{
- const nvsp_msg *nvsp_msg_pkt;
- netvsc_packet *net_vsc_pkt;
-
- nvsp_msg_pkt = VMBUS_CHANPKT_CONST_DATA(pkt);
-
- if (nvsp_msg_pkt->hdr.msg_type == nvsp_msg_type_init_complete
- || nvsp_msg_pkt->hdr.msg_type
- == nvsp_msg_1_type_send_rx_buf_complete
- || nvsp_msg_pkt->hdr.msg_type
- == nvsp_msg_1_type_send_send_buf_complete
- || nvsp_msg_pkt->hdr.msg_type
- == nvsp_msg5_type_subchannel) {
- /* Copy the response back */
- memcpy(&net_dev->channel_init_packet, nvsp_msg_pkt,
- sizeof(nvsp_msg));
- sema_post(&net_dev->channel_init_sema);
- } else if (nvsp_msg_pkt->hdr.msg_type ==
- nvsp_msg_1_type_send_rndis_pkt_complete) {
- /* Get the send context */
- net_vsc_pkt =
- (netvsc_packet *)(unsigned long)pkt->cph_xactid;
- if (NULL != net_vsc_pkt) {
- if (net_vsc_pkt->send_buf_section_idx !=
- NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
- u_long mask;
- int idx;
-
- idx = net_vsc_pkt->send_buf_section_idx /
- BITS_PER_LONG;
- KASSERT(idx < net_dev->bitsmap_words,
- ("invalid section index %u",
- net_vsc_pkt->send_buf_section_idx));
- mask = 1UL <<
- (net_vsc_pkt->send_buf_section_idx %
- BITS_PER_LONG);
-
- KASSERT(net_dev->send_section_bitsmap[idx] &
- mask,
- ("index bitmap 0x%lx, section index %u, "
- "bitmap idx %d, bitmask 0x%lx",
- net_dev->send_section_bitsmap[idx],
- net_vsc_pkt->send_buf_section_idx,
- idx, mask));
- atomic_clear_long(
- &net_dev->send_section_bitsmap[idx], mask);
- }
-
- /* Notify the layer above us */
- net_vsc_pkt->compl.send.on_send_completion(chan,
- net_vsc_pkt->compl.send.send_completion_context);
+ struct hn_send_ctx *sndc;
- }
- }
+ sndc = (struct hn_send_ctx *)(uintptr_t)pkt->cph_xactid;
+ sndc->hn_cb(sndc, net_dev, chan, VMBUS_CHANPKT_CONST_DATA(pkt),
+ VMBUS_CHANPKT_DATALEN(pkt));
+ /*
+ * NOTE:
+ * 'sndc' CAN NOT be accessed anymore, since it can be freed by
+ * its callback.
+ */
}
/*
@@ -799,32 +814,23 @@ hv_nv_on_send_completion(netvsc_dev *net_dev, struct vmbus_channel *chan,
* Returns 0 on success, non-zero on failure.
*/
int
-hv_nv_on_send(struct vmbus_channel *chan, netvsc_packet *pkt)
+hv_nv_on_send(struct vmbus_channel *chan, uint32_t rndis_mtype,
+ struct hn_send_ctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt)
{
- nvsp_msg send_msg;
+ struct hn_nvs_rndis rndis;
int ret;
- send_msg.hdr.msg_type = nvsp_msg_1_type_send_rndis_pkt;
- if (pkt->is_data_pkt) {
- /* 0 is RMC_DATA */
- send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 0;
- } else {
- /* 1 is RMC_CONTROL */
- send_msg.msgs.vers_1_msgs.send_rndis_pkt.chan_type = 1;
- }
+ rndis.nvs_type = HN_NVS_TYPE_RNDIS;
+ rndis.nvs_rndis_mtype = rndis_mtype;
+ rndis.nvs_chim_idx = sndc->hn_chim_idx;
+ rndis.nvs_chim_sz = sndc->hn_chim_sz;
- send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_idx =
- pkt->send_buf_section_idx;
- send_msg.msgs.vers_1_msgs.send_rndis_pkt.send_buf_section_size =
- pkt->send_buf_section_size;
-
- if (pkt->gpa_cnt) {
- ret = vmbus_chan_send_sglist(chan, pkt->gpa, pkt->gpa_cnt,
- &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt);
+ if (gpa_cnt) {
+ ret = hn_nvs_send_sglist(chan, gpa, gpa_cnt,
+ &rndis, sizeof(rndis), sndc);
} else {
- ret = vmbus_chan_send(chan,
- VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
- &send_msg, sizeof(nvsp_msg), (uint64_t)(uintptr_t)pkt);
+ ret = hn_nvs_send(chan, VMBUS_CHANPKT_FLAG_RC,
+ &rndis, sizeof(rndis), sndc);
}
return (ret);
@@ -841,19 +847,18 @@ hv_nv_on_receive(netvsc_dev *net_dev, struct hn_rx_ring *rxr,
struct vmbus_channel *chan, const struct vmbus_chanpkt_hdr *pkthdr)
{
const struct vmbus_chanpkt_rxbuf *pkt;
- const nvsp_msg *nvsp_msg_pkt;
+ const struct hn_nvs_hdr *nvs_hdr;
netvsc_packet vsc_pkt;
netvsc_packet *net_vsc_pkt = &vsc_pkt;
int count = 0;
int i = 0;
int status = nvsp_status_success;
- nvsp_msg_pkt = VMBUS_CHANPKT_CONST_DATA(pkthdr);
-
- /* Make sure this is a valid nvsp packet */
- if (nvsp_msg_pkt->hdr.msg_type != nvsp_msg_1_type_send_rndis_pkt) {
- if_printf(rxr->hn_ifp, "packet hdr type %u is invalid!\n",
- nvsp_msg_pkt->hdr.msg_type);
+ /* Make sure that this is a RNDIS message. */
+ nvs_hdr = VMBUS_CHANPKT_CONST_DATA(pkthdr);
+ if (__predict_false(nvs_hdr->nvs_type != HN_NVS_TYPE_RNDIS)) {
+ if_printf(rxr->hn_ifp, "nvs type %u, not RNDIS\n",
+ nvs_hdr->nvs_type);
return;
}
diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h
index 623166875d84..c5dc3b2b9adb 100644
--- a/sys/dev/hyperv/netvsc/hv_net_vsc.h
+++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h
@@ -44,6 +44,7 @@
#include <sys/malloc.h>
#include <sys/queue.h>
#include <sys/taskqueue.h>
+#include <sys/sema.h>
#include <sys/sx.h>
#include <machine/bus.h>
@@ -1059,7 +1060,6 @@ typedef struct netvsc_dev_ {
uint32_t rx_buf_size;
uint32_t rx_buf_gpadl_handle;
uint32_t rx_section_count;
- nvsp_1_rx_buf_section *rx_sections;
/* Used for NetVSP initialization protocol */
struct sema channel_init_sema;
@@ -1087,6 +1087,7 @@ struct vmbus_channel;
typedef void (*pfn_on_send_rx_completion)(struct vmbus_channel *, void *);
#define NETVSC_DEVICE_RING_BUFFER_SIZE (128 * PAGE_SIZE)
+#define NETVSC_PACKET_MAXPAGE 32
#define NETVSC_VLAN_PRIO_MASK 0xe000
#define NETVSC_VLAN_PRIO_SHIFT 13
@@ -1110,33 +1111,10 @@ typedef void (*pfn_on_send_rx_completion)(struct vmbus_channel *, void *);
#endif
typedef struct netvsc_packet_ {
- uint8_t is_data_pkt; /* One byte */
- uint16_t vlan_tci;
- uint32_t status;
-
- /* Completion */
- union {
- struct {
- uint64_t rx_completion_tid;
- void *rx_completion_context;
- /* This is no longer used */
- pfn_on_send_rx_completion on_rx_completion;
- } rx;
- struct {
- uint64_t send_completion_tid;
- void *send_completion_context;
- /* Still used in netvsc and filter code */
- pfn_on_send_rx_completion on_send_completion;
- } send;
- } compl;
- uint32_t send_buf_section_idx;
- uint32_t send_buf_section_size;
-
- void *rndis_mesg;
+ uint16_t vlan_tci;
+ uint32_t status;
uint32_t tot_data_buf_len;
void *data;
- uint32_t gpa_cnt;
- struct vmbus_gpa gpa[VMBUS_CHAN_SGLIST_MAX];
} netvsc_packet;
typedef struct {
@@ -1214,6 +1192,9 @@ struct hn_tx_ring {
bus_dma_tag_t hn_tx_data_dtag;
uint64_t hn_csum_assist;
+ int hn_gpa_cnt;
+ struct vmbus_gpa hn_gpa[NETVSC_PACKET_MAXPAGE];
+
u_long hn_no_txdescs;
u_long hn_send_failed;
u_long hn_txdma_failed;
@@ -1261,19 +1242,22 @@ typedef struct hn_softc {
struct taskqueue *hn_tx_taskq;
struct sysctl_oid *hn_tx_sysctl_tree;
struct sysctl_oid *hn_rx_sysctl_tree;
+ struct vmbus_xact_ctx *hn_xact;
} hn_softc_t;
/*
* Externs
*/
extern int hv_promisc_mode;
+struct hn_send_ctx;
void netvsc_linkstatus_callback(struct hn_softc *sc, uint32_t status);
netvsc_dev *hv_nv_on_device_add(struct hn_softc *sc,
void *additional_info, struct hn_rx_ring *rxr);
int hv_nv_on_device_remove(struct hn_softc *sc,
boolean_t destroy_channel);
-int hv_nv_on_send(struct vmbus_channel *chan, netvsc_packet *pkt);
+int hv_nv_on_send(struct vmbus_channel *chan, uint32_t rndis_mtype,
+ struct hn_send_ctx *sndc, struct vmbus_gpa *gpa, int gpa_cnt);
int hv_nv_get_next_send_section(netvsc_dev *net_dev);
void hv_nv_subchan_attach(struct vmbus_channel *chan,
struct hn_rx_ring *rxr);
diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
index adabe8c4c32b..7cf9c54f60d9 100644
--- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
+++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c
@@ -69,6 +69,7 @@ __FBSDID("$FreeBSD$");
#include <sys/queue.h>
#include <sys/lock.h>
#include <sys/sx.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
#include <sys/buf_ring.h>
@@ -114,6 +115,7 @@ __FBSDID("$FreeBSD$");
#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/hyperv_busdma.h>
+#include <dev/hyperv/include/vmbus_xact.h>
#include "hv_net_vsc.h"
#include "hv_rndis.h"
@@ -123,6 +125,9 @@ __FBSDID("$FreeBSD$");
/* Short for Hyper-V network interface */
#define NETVSC_DEVNAME "hn"
+#define HN_XACT_REQ_SIZE (2 * PAGE_SIZE)
+#define HN_XACT_RESP_SIZE (2 * PAGE_SIZE)
+
/*
* It looks like offset 0 of buf is reserved to hold the softc pointer.
* The sc pointer evidently not needed, and is not presently populated.
@@ -151,7 +156,7 @@ __FBSDID("$FreeBSD$");
#define HN_TX_DATA_MAXSIZE IP_MAXPACKET
#define HN_TX_DATA_SEGSIZE PAGE_SIZE
#define HN_TX_DATA_SEGCNT_MAX \
- (VMBUS_CHAN_SGLIST_MAX - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
+ (NETVSC_PACKET_MAXPAGE - HV_RF_NUM_TX_RESERVED_PAGE_BUFS)
#define HN_DIRECT_TX_SIZE_DEF 128
@@ -165,7 +170,7 @@ struct hn_txdesc {
struct hn_tx_ring *txr;
int refs;
uint32_t flags; /* HN_TXD_FLAG_ */
- netvsc_packet netvsc_pkt; /* XXX to be removed */
+ struct hn_send_ctx send_ctx;
bus_dmamap_t data_dmap;
@@ -541,6 +546,11 @@ netvsc_attach(device_t dev)
IFCAP_LRO;
ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO;
+ sc->hn_xact = vmbus_xact_ctx_create(bus_get_dma_tag(dev),
+ HN_XACT_REQ_SIZE, HN_XACT_RESP_SIZE, 0);
+ if (sc->hn_xact == NULL)
+ goto failed;
+
error = hv_rf_on_device_add(sc, &device_info, ring_cnt,
&sc->hn_rx_ring[0]);
if (error)
@@ -642,6 +652,7 @@ netvsc_detach(device_t dev)
if (sc->hn_tx_taskq != hn_tx_taskq)
taskqueue_free(sc->hn_tx_taskq);
+ vmbus_xact_ctx_destroy(sc->hn_xact);
return (0);
}
@@ -780,14 +791,15 @@ hn_txeof(struct hn_tx_ring *txr)
}
static void
-hn_tx_done(struct vmbus_channel *chan, void *xpkt)
+hn_tx_done(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
+ struct vmbus_channel *chan, const struct nvsp_msg_ *msg __unused,
+ int dlen __unused)
{
- netvsc_packet *packet = xpkt;
- struct hn_txdesc *txd;
+ struct hn_txdesc *txd = sndc->hn_cbarg;
struct hn_tx_ring *txr;
- txd = (struct hn_txdesc *)(uintptr_t)
- packet->compl.send.send_completion_tid;
+ if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
+ hn_chim_free(net_dev, sndc->hn_chim_idx);
txr = txd->txr;
KASSERT(txr->hn_chan == chan,
@@ -834,16 +846,14 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX];
int error, nsegs, i;
struct mbuf *m_head = *m_head0;
- netvsc_packet *packet;
rndis_msg *rndis_mesg;
rndis_packet *rndis_pkt;
rndis_per_packet_info *rppi;
struct rndis_hash_value *hash_value;
- uint32_t rndis_msg_size;
+ uint32_t rndis_msg_size, tot_data_buf_len, send_buf_section_idx;
+ int send_buf_section_size;
- packet = &txd->netvsc_pkt;
- packet->is_data_pkt = TRUE;
- packet->tot_data_buf_len = m_head->m_pkthdr.len;
+ tot_data_buf_len = m_head->m_pkthdr.len;
/*
* extension points to the area reserved for the
@@ -858,7 +868,7 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
rndis_pkt = &rndis_mesg->msg.packet;
rndis_pkt->data_offset = sizeof(rndis_packet);
- rndis_pkt->data_length = packet->tot_data_buf_len;
+ rndis_pkt->data_length = tot_data_buf_len;
rndis_pkt->per_pkt_info_offset = sizeof(rndis_packet);
rndis_msg_size = RNDIS_MESSAGE_SIZE(rndis_packet);
@@ -966,21 +976,19 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
}
}
- rndis_mesg->msg_len = packet->tot_data_buf_len + rndis_msg_size;
- packet->tot_data_buf_len = rndis_mesg->msg_len;
+ rndis_mesg->msg_len = tot_data_buf_len + rndis_msg_size;
+ tot_data_buf_len = rndis_mesg->msg_len;
/*
* Chimney send, if the packet could fit into one chimney buffer.
*/
- if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) {
+ if (tot_data_buf_len < txr->hn_tx_chimney_size) {
netvsc_dev *net_dev = txr->hn_sc->net_dev;
- uint32_t send_buf_section_idx;
txr->hn_tx_chimney_tried++;
send_buf_section_idx =
hv_nv_get_next_send_section(net_dev);
- if (send_buf_section_idx !=
- NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
+ if (send_buf_section_idx != HN_NVS_CHIM_IDX_INVALID) {
uint8_t *dest = ((uint8_t *)net_dev->send_buf +
(send_buf_section_idx *
net_dev->send_section_size));
@@ -989,10 +997,8 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
dest += rndis_msg_size;
m_copydata(m_head, 0, m_head->m_pkthdr.len, dest);
- packet->send_buf_section_idx = send_buf_section_idx;
- packet->send_buf_section_size =
- packet->tot_data_buf_len;
- packet->gpa_cnt = 0;
+ send_buf_section_size = tot_data_buf_len;
+ txr->hn_gpa_cnt = 0;
txr->hn_tx_chimney++;
goto done;
}
@@ -1018,19 +1024,19 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
}
*m_head0 = m_head;
- packet->gpa_cnt = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
+ txr->hn_gpa_cnt = nsegs + HV_RF_NUM_TX_RESERVED_PAGE_BUFS;
/* send packet with page buffer */
- packet->gpa[0].gpa_page = atop(txd->rndis_msg_paddr);
- packet->gpa[0].gpa_ofs = txd->rndis_msg_paddr & PAGE_MASK;
- packet->gpa[0].gpa_len = rndis_msg_size;
+ txr->hn_gpa[0].gpa_page = atop(txd->rndis_msg_paddr);
+ txr->hn_gpa[0].gpa_ofs = txd->rndis_msg_paddr & PAGE_MASK;
+ txr->hn_gpa[0].gpa_len = rndis_msg_size;
/*
* Fill the page buffers with mbuf info starting at index
* HV_RF_NUM_TX_RESERVED_PAGE_BUFS.
*/
for (i = 0; i < nsegs; ++i) {
- struct vmbus_gpa *gpa = &packet->gpa[
+ struct vmbus_gpa *gpa = &txr->hn_gpa[
i + HV_RF_NUM_TX_RESERVED_PAGE_BUFS];
gpa->gpa_page = atop(segs[i].ds_addr);
@@ -1038,16 +1044,14 @@ hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0)
gpa->gpa_len = segs[i].ds_len;
}
- packet->send_buf_section_idx =
- NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
- packet->send_buf_section_size = 0;
+ send_buf_section_idx = HN_NVS_CHIM_IDX_INVALID;
+ send_buf_section_size = 0;
done:
txd->m = m_head;
/* Set the completion routine */
- packet->compl.send.on_send_completion = hn_tx_done;
- packet->compl.send.send_completion_context = packet;
- packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd;
+ hn_send_ctx_init(&txd->send_ctx, hn_tx_done, txd,
+ send_buf_section_idx, send_buf_section_size);
return 0;
}
@@ -1067,7 +1071,8 @@ again:
* Make sure that txd is not freed before ETHER_BPF_MTAP.
*/
hn_txdesc_hold(txd);
- error = hv_nv_on_send(txr->hn_chan, &txd->netvsc_pkt);
+ error = hv_nv_on_send(txr->hn_chan, HN_NVS_RNDIS_MTYPE_DATA,
+ &txd->send_ctx, txr->hn_gpa, txr->hn_gpa_cnt);
if (!error) {
ETHER_BPF_MTAP(ifp, txd->m);
if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
diff --git a/sys/dev/hyperv/netvsc/hv_rndis.h b/sys/dev/hyperv/netvsc/hv_rndis.h
index da2b408c6494..6527668de9fe 100644
--- a/sys/dev/hyperv/netvsc/hv_rndis.h
+++ b/sys/dev/hyperv/netvsc/hv_rndis.h
@@ -41,6 +41,10 @@
#define NDIS_VERSION_6_1 0x00060001
#define NDIS_VERSION_6_30 0x0006001e
+#define NDIS_VERSION_MAJOR_6 6
+#define NDIS_VERSION_MINOR_1 1
+#define NDIS_VERSION_MINOR_30 30
+
#define NDIS_VERSION (NDIS_VERSION_5_1)
/*
diff --git a/sys/dev/hyperv/netvsc/hv_rndis_filter.c b/sys/dev/hyperv/netvsc/hv_rndis_filter.c
index 7426b8926cdb..644b017c0516 100644
--- a/sys/dev/hyperv/netvsc/hv_rndis_filter.c
+++ b/sys/dev/hyperv/netvsc/hv_rndis_filter.c
@@ -46,10 +46,11 @@ __FBSDID("$FreeBSD$");
#include <vm/pmap.h>
#include <dev/hyperv/include/hyperv.h>
-#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
-#include "hv_net_vsc.h"
-#include "hv_rndis.h"
-#include "hv_rndis_filter.h"
+#include <dev/hyperv/include/vmbus_xact.h>
+#include <dev/hyperv/netvsc/hv_net_vsc.h>
+#include <dev/hyperv/netvsc/hv_rndis.h>
+#include <dev/hyperv/netvsc/hv_rndis_filter.h>
+#include <dev/hyperv/netvsc/if_hnreg.h>
struct hv_rf_recvinfo {
const ndis_8021q_info *vlan_info;
@@ -86,11 +87,17 @@ static int hv_rf_set_packet_filter(rndis_device *device, uint32_t new_filter);
static int hv_rf_init_device(rndis_device *device);
static int hv_rf_open_device(rndis_device *device);
static int hv_rf_close_device(rndis_device *device);
-static void hv_rf_on_send_request_completion(struct vmbus_channel *, void *context);
-static void hv_rf_on_send_request_halt_completion(struct vmbus_channel *, void *context);
int
hv_rf_send_offload_request(struct hn_softc *sc,
rndis_offload_params *offloads);
+
+static void hn_rndis_sent_halt(struct hn_send_ctx *sndc,
+ struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
+ const struct nvsp_msg_ *msg, int dlen);
+static void hn_rndis_sent_cb(struct hn_send_ctx *sndc,
+ struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
+ const struct nvsp_msg_ *msg, int dlen);
+
/*
* Set the Per-Packet-Info with the specified type
*/
@@ -239,66 +246,56 @@ static int
hv_rf_send_request(rndis_device *device, rndis_request *request,
uint32_t message_type)
{
- int ret;
- netvsc_packet *packet;
netvsc_dev *net_dev = device->net_dev;
- int send_buf_section_idx;
+ uint32_t send_buf_section_idx, tot_data_buf_len;
+ struct vmbus_gpa gpa[2];
+ int gpa_cnt, send_buf_section_size;
+ hn_sent_callback_t cb;
/* Set up the packet to send it */
- packet = &request->pkt;
-
- packet->is_data_pkt = FALSE;
- packet->tot_data_buf_len = request->request_msg.msg_len;
- packet->gpa_cnt = 1;
-
- packet->gpa[0].gpa_page =
- hv_get_phys_addr(&request->request_msg) >> PAGE_SHIFT;
- packet->gpa[0].gpa_len = request->request_msg.msg_len;
- packet->gpa[0].gpa_ofs =
- (unsigned long)&request->request_msg & (PAGE_SIZE - 1);
-
- if (packet->gpa[0].gpa_ofs + packet->gpa[0].gpa_len > PAGE_SIZE) {
- packet->gpa_cnt = 2;
- packet->gpa[0].gpa_len = PAGE_SIZE - packet->gpa[0].gpa_ofs;
- packet->gpa[1].gpa_page =
- hv_get_phys_addr((char*)&request->request_msg +
- packet->gpa[0].gpa_len) >> PAGE_SHIFT;
- packet->gpa[1].gpa_ofs = 0;
- packet->gpa[1].gpa_len = request->request_msg.msg_len -
- packet->gpa[0].gpa_len;
+ tot_data_buf_len = request->request_msg.msg_len;
+
+ gpa_cnt = 1;
+ gpa[0].gpa_page = hv_get_phys_addr(&request->request_msg) >> PAGE_SHIFT;
+ gpa[0].gpa_len = request->request_msg.msg_len;
+ gpa[0].gpa_ofs = (unsigned long)&request->request_msg & (PAGE_SIZE - 1);
+
+ if (gpa[0].gpa_ofs + gpa[0].gpa_len > PAGE_SIZE) {
+ gpa_cnt = 2;
+ gpa[0].gpa_len = PAGE_SIZE - gpa[0].gpa_ofs;
+ gpa[1].gpa_page =
+ hv_get_phys_addr((char*)&request->request_msg +
+ gpa[0].gpa_len) >> PAGE_SHIFT;
+ gpa[1].gpa_ofs = 0;
+ gpa[1].gpa_len = request->request_msg.msg_len - gpa[0].gpa_len;
}
- packet->compl.send.send_completion_context = request; /* packet */
- if (message_type != REMOTE_NDIS_HALT_MSG) {
- packet->compl.send.on_send_completion =
- hv_rf_on_send_request_completion;
- } else {
- packet->compl.send.on_send_completion =
- hv_rf_on_send_request_halt_completion;
- }
- packet->compl.send.send_completion_tid = (unsigned long)device;
- if (packet->tot_data_buf_len < net_dev->send_section_size) {
+ if (message_type != REMOTE_NDIS_HALT_MSG)
+ cb = hn_rndis_sent_cb;
+ else
+ cb = hn_rndis_sent_halt;
+
+ if (tot_data_buf_len < net_dev->send_section_size) {
send_buf_section_idx = hv_nv_get_next_send_section(net_dev);
- if (send_buf_section_idx !=
- NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX) {
+ if (send_buf_section_idx != HN_NVS_CHIM_IDX_INVALID) {
char *dest = ((char *)net_dev->send_buf +
send_buf_section_idx * net_dev->send_section_size);
memcpy(dest, &request->request_msg, request->request_msg.msg_len);
- packet->send_buf_section_idx = send_buf_section_idx;
- packet->send_buf_section_size = packet->tot_data_buf_len;
- packet->gpa_cnt = 0;
+ send_buf_section_size = tot_data_buf_len;
+ gpa_cnt = 0;
goto sendit;
}
/* Failed to allocate chimney send buffer; move on */
}
- packet->send_buf_section_idx = NVSP_1_CHIMNEY_SEND_INVALID_SECTION_INDEX;
- packet->send_buf_section_size = 0;
+ send_buf_section_idx = HN_NVS_CHIM_IDX_INVALID;
+ send_buf_section_size = 0;
sendit:
- ret = hv_nv_on_send(device->net_dev->sc->hn_prichan, packet);
-
- return (ret);
+ hn_send_ctx_init(&request->send_ctx, cb, request,
+ send_buf_section_idx, send_buf_section_size);
+ return hv_nv_on_send(device->net_dev->sc->hn_prichan,
+ HN_NVS_RNDIS_MTYPE_CTRL, &request->send_ctx, gpa, gpa_cnt);
}
/*
@@ -1060,15 +1057,20 @@ int
hv_rf_on_device_add(struct hn_softc *sc, void *additl_info,
int nchan, struct hn_rx_ring *rxr)
{
+ struct hn_send_ctx sndc;
int ret;
netvsc_dev *net_dev;
rndis_device *rndis_dev;
- nvsp_msg *init_pkt;
rndis_offload_params offloads;
struct rndis_recv_scale_cap rsscaps;
uint32_t rsscaps_size = sizeof(struct rndis_recv_scale_cap);
netvsc_device_info *dev_info = (netvsc_device_info *)additl_info;
device_t dev = sc->hn_dev;
+ struct hn_nvs_subch_req *req;
+ const struct hn_nvs_subch_resp *resp;
+ size_t resp_len;
+ struct vmbus_xact *xact;
+ uint32_t status, nsubch;
rndis_dev = hv_get_rndis_device();
if (rndis_dev == NULL) {
@@ -1156,35 +1158,64 @@ hv_rf_on_device_add(struct hn_softc *sc, void *additl_info,
goto out;
}
- /* request host to create sub channels */
- init_pkt = &net_dev->channel_init_packet;
- memset(init_pkt, 0, sizeof(nvsp_msg));
-
- init_pkt->hdr.msg_type = nvsp_msg5_type_subchannel;
- init_pkt->msgs.vers_5_msgs.subchannel_request.op =
- NVSP_SUBCHANNE_ALLOCATE;
- init_pkt->msgs.vers_5_msgs.subchannel_request.num_subchannels =
- net_dev->num_channel - 1;
-
- ret = vmbus_chan_send(sc->hn_prichan,
- VMBUS_CHANPKT_TYPE_INBAND, VMBUS_CHANPKT_FLAG_RC,
- init_pkt, sizeof(nvsp_msg), (uint64_t)(uintptr_t)init_pkt);
- if (ret != 0) {
- device_printf(dev, "Fail to allocate subchannel\n");
+ /*
+ * Ask NVS to allocate sub-channels.
+ */
+ xact = vmbus_xact_get(sc->hn_xact, sizeof(*req));
+ if (xact == NULL) {
+ if_printf(sc->hn_ifp, "no xact for nvs subch req\n");
+ ret = ENXIO;
goto out;
}
- sema_wait(&net_dev->channel_init_sema);
+ req = vmbus_xact_req_data(xact);
+ req->nvs_type = HN_NVS_TYPE_SUBCH_REQ;
+ req->nvs_op = HN_NVS_SUBCH_OP_ALLOC;
+ req->nvs_nsubch = net_dev->num_channel - 1;
+
+ hn_send_ctx_init_simple(&sndc, hn_nvs_sent_xact, xact);
+ vmbus_xact_activate(xact);
- if (init_pkt->msgs.vers_5_msgs.subchn_complete.status !=
- nvsp_status_success) {
- ret = ENODEV;
- device_printf(dev, "sub channel complete error\n");
+ ret = hn_nvs_send(sc->hn_prichan, VMBUS_CHANPKT_FLAG_RC,
+ req, sizeof(*req), &sndc);
+ if (ret != 0) {
+ if_printf(sc->hn_ifp, "send nvs subch req failed: %d\n", ret);
+ vmbus_xact_deactivate(xact);
+ vmbus_xact_put(xact);
goto out;
}
- net_dev->num_channel = 1 +
- init_pkt->msgs.vers_5_msgs.subchn_complete.num_subchannels;
+ resp = vmbus_xact_wait(xact, &resp_len);
+ if (resp_len < sizeof(*resp)) {
+ if_printf(sc->hn_ifp, "invalid subch resp length %zu\n",
+ resp_len);
+ vmbus_xact_put(xact);
+ ret = EINVAL;
+ goto out;
+ }
+ if (resp->nvs_type != HN_NVS_TYPE_SUBCH_RESP) {
+ if_printf(sc->hn_ifp, "not subch resp, type %u\n",
+ resp->nvs_type);
+ vmbus_xact_put(xact);
+ ret = EINVAL;
+ goto out;
+ }
+
+ status = resp->nvs_status;
+ nsubch = resp->nvs_nsubch;
+ vmbus_xact_put(xact);
+
+ if (status != HN_NVS_STATUS_OK) {
+ if_printf(sc->hn_ifp, "subch req failed: %x\n", status);
+ ret = EIO;
+ goto out;
+ }
+ if (nsubch > net_dev->num_channel - 1) {
+ if_printf(sc->hn_ifp, "%u subchans are allocated, requested %u\n",
+ nsubch, net_dev->num_channel - 1);
+ nsubch = net_dev->num_channel - 1;
+ }
+ net_dev->num_channel = nsubch + 1;
ret = hv_rf_set_rss_param(rndis_dev, net_dev->num_channel);
@@ -1239,23 +1270,24 @@ hv_rf_on_close(struct hn_softc *sc)
return (hv_rf_close_device((rndis_device *)net_dev->extension));
}
-/*
- * RNDIS filter on send request completion callback
- */
-static void
-hv_rf_on_send_request_completion(struct vmbus_channel *chan __unused,
- void *context __unused)
+static void
+hn_rndis_sent_cb(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
+ struct vmbus_channel *chan __unused, const struct nvsp_msg_ *msg __unused,
+ int dlen __unused)
{
+ if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
+ hn_chim_free(net_dev, sndc->hn_chim_idx);
}
-/*
- * RNDIS filter on send request (halt only) completion callback
- */
-static void
-hv_rf_on_send_request_halt_completion(struct vmbus_channel *chan __unused,
- void *context)
+static void
+hn_rndis_sent_halt(struct hn_send_ctx *sndc, struct netvsc_dev_ *net_dev,
+ struct vmbus_channel *chan __unused, const struct nvsp_msg_ *msg __unused,
+ int dlen __unused)
{
- rndis_request *request = context;
+ rndis_request *request = sndc->hn_cbarg;
+
+ if (sndc->hn_chim_idx != HN_NVS_CHIM_IDX_INVALID)
+ hn_chim_free(net_dev, sndc->hn_chim_idx);
/*
* Notify hv_rf_halt_device() about halt completion.
diff --git a/sys/dev/hyperv/netvsc/hv_rndis_filter.h b/sys/dev/hyperv/netvsc/hv_rndis_filter.h
index 2f940db33e89..ebfda20b94db 100644
--- a/sys/dev/hyperv/netvsc/hv_rndis_filter.h
+++ b/sys/dev/hyperv/netvsc/hv_rndis_filter.h
@@ -33,6 +33,7 @@
#include <sys/param.h>
#include <net/ethernet.h>
+#include <dev/hyperv/netvsc/if_hnvar.h>
/*
* Defines
@@ -75,7 +76,7 @@ typedef struct rndis_request_ {
uint8_t buf_resp[PAGE_SIZE];
/* Simplify allocation by having a netvsc packet inline */
- netvsc_packet pkt;
+ struct hn_send_ctx send_ctx;
/*
* The max request size is sizeof(rndis_msg) + PAGE_SIZE.
diff --git a/sys/dev/hyperv/netvsc/if_hnreg.h b/sys/dev/hyperv/netvsc/if_hnreg.h
new file mode 100644
index 000000000000..7d86e9e18d02
--- /dev/null
+++ b/sys/dev/hyperv/netvsc/if_hnreg.h
@@ -0,0 +1,199 @@
+/*-
+ * Copyright (c) 2016 Microsoft Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_HNREG_H_
+#define _IF_HNREG_H_
+
+#include <sys/param.h>
+#include <sys/systm.h>
+
+#define HN_NVS_RXBUF_SIG 0xcafe
+#define HN_NVS_CHIM_SIG 0xface
+
+#define HN_NVS_CHIM_IDX_INVALID 0xffffffff
+
+#define HN_NVS_RNDIS_MTYPE_DATA 0
+#define HN_NVS_RNDIS_MTYPE_CTRL 1
+
+/*
+ * NVS message transacion status codes.
+ */
+#define HN_NVS_STATUS_OK 1
+
+/*
+ * NVS request/response message types.
+ */
+#define HN_NVS_TYPE_INIT 1
+#define HN_NVS_TYPE_INIT_RESP 2
+#define HN_NVS_TYPE_NDIS_INIT 100
+#define HN_NVS_TYPE_RXBUF_CONN 101
+#define HN_NVS_TYPE_RXBUF_CONNRESP 102
+#define HN_NVS_TYPE_RXBUF_DISCONN 103
+#define HN_NVS_TYPE_CHIM_CONN 104
+#define HN_NVS_TYPE_CHIM_CONNRESP 105
+#define HN_NVS_TYPE_CHIM_DISCONN 106
+#define HN_NVS_TYPE_RNDIS 107
+#define HN_NVS_TYPE_NDIS_CONF 125
+#define HN_NVS_TYPE_SUBCH_REQ 133
+#define HN_NVS_TYPE_SUBCH_RESP 133 /* same as SUBCH_REQ */
+
+/*
+ * Any size less than this one will _not_ work, e.g. hn_nvs_init
+ * only has 12B valid data, however, if only 12B data were sent,
+ * Hypervisor would never reply.
+ */
+#define HN_NVS_REQSIZE_MIN 32
+
+/* NVS message common header */
+struct hn_nvs_hdr {
+ uint32_t nvs_type;
+} __packed;
+
+struct hn_nvs_init {
+ uint32_t nvs_type; /* HN_NVS_TYPE_INIT */
+ uint32_t nvs_ver_min;
+ uint32_t nvs_ver_max;
+ uint8_t nvs_rsvd[20];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_init) >= HN_NVS_REQSIZE_MIN);
+
+struct hn_nvs_init_resp {
+ uint32_t nvs_type; /* HN_NVS_TYPE_INIT_RESP */
+ uint32_t nvs_ver; /* deprecated */
+ uint32_t nvs_rsvd;
+ uint32_t nvs_status; /* HN_NVS_STATUS_ */
+} __packed;
+
+/* No reponse */
+struct hn_nvs_ndis_conf {
+ uint32_t nvs_type; /* HN_NVS_TYPE_NDIS_CONF */
+ uint32_t nvs_mtu;
+ uint32_t nvs_rsvd;
+ uint64_t nvs_caps; /* HN_NVS_NDIS_CONF_ */
+ uint8_t nvs_rsvd1[12];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_ndis_conf) >= HN_NVS_REQSIZE_MIN);
+
+#define HN_NVS_NDIS_CONF_SRIOV 0x0004
+#define HN_NVS_NDIS_CONF_VLAN 0x0008
+
+/* No response */
+struct hn_nvs_ndis_init {
+ uint32_t nvs_type; /* HN_NVS_TYPE_NDIS_INIT */
+ uint32_t nvs_ndis_major; /* NDIS_VERSION_MAJOR_ */
+ uint32_t nvs_ndis_minor; /* NDIS_VERSION_MINOR_ */
+ uint8_t nvs_rsvd[20];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_ndis_init) >= HN_NVS_REQSIZE_MIN);
+
+struct hn_nvs_rxbuf_conn {
+ uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_CONN */
+ uint32_t nvs_gpadl; /* RXBUF vmbus GPADL */
+ uint16_t nvs_sig; /* HN_NVS_RXBUF_SIG */
+ uint8_t nvs_rsvd[22];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_rxbuf_conn) >= HN_NVS_REQSIZE_MIN);
+
+struct hn_nvs_rxbuf_sect {
+ uint32_t nvs_start;
+ uint32_t nvs_slotsz;
+ uint32_t nvs_slotcnt;
+ uint32_t nvs_end;
+} __packed;
+
+struct hn_nvs_rxbuf_connresp {
+ uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_CONNRESP */
+ uint32_t nvs_status; /* HN_NVS_STATUS_ */
+ uint32_t nvs_nsect; /* # of elem in nvs_sect */
+ struct hn_nvs_rxbuf_sect nvs_sect[];
+} __packed;
+
+/* No response */
+struct hn_nvs_rxbuf_disconn {
+ uint32_t nvs_type; /* HN_NVS_TYPE_RXBUF_DISCONN */
+ uint16_t nvs_sig; /* HN_NVS_RXBUF_SIG */
+ uint8_t nvs_rsvd[26];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_rxbuf_disconn) >= HN_NVS_REQSIZE_MIN);
+
+struct hn_nvs_chim_conn {
+ uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_CONN */
+ uint32_t nvs_gpadl; /* chimney buf vmbus GPADL */
+ uint16_t nvs_sig; /* NDIS_NVS_CHIM_SIG */
+ uint8_t nvs_rsvd[22];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_chim_conn) >= HN_NVS_REQSIZE_MIN);
+
+struct hn_nvs_chim_connresp {
+ uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_CONNRESP */
+ uint32_t nvs_status; /* HN_NVS_STATUS_ */
+ uint32_t nvs_sectsz; /* section size */
+} __packed;
+
+/* No response */
+struct hn_nvs_chim_disconn {
+ uint32_t nvs_type; /* HN_NVS_TYPE_CHIM_DISCONN */
+ uint16_t nvs_sig; /* HN_NVS_CHIM_SIG */
+ uint8_t nvs_rsvd[26];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_chim_disconn) >= HN_NVS_REQSIZE_MIN);
+
+#define HN_NVS_SUBCH_OP_ALLOC 1
+
+struct hn_nvs_subch_req {
+ uint32_t nvs_type; /* HN_NVS_TYPE_SUBCH_REQ */
+ uint32_t nvs_op; /* HN_NVS_SUBCH_OP_ */
+ uint32_t nvs_nsubch;
+ uint8_t nvs_rsvd[20];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_subch_req) >= HN_NVS_REQSIZE_MIN);
+
+struct hn_nvs_subch_resp {
+ uint32_t nvs_type; /* HN_NVS_TYPE_SUBCH_RESP */
+ uint32_t nvs_status; /* HN_NVS_STATUS_ */
+ uint32_t nvs_nsubch;
+} __packed;
+
+struct hn_nvs_rndis {
+ uint32_t nvs_type; /* HN_NVS_TYPE_RNDIS */
+ uint32_t nvs_rndis_mtype;/* HN_NVS_RNDIS_MTYPE_ */
+ /*
+ * Chimney sending buffer index and size.
+ *
+ * NOTE:
+ * If nvs_chim_idx is set to HN_NVS_CHIM_IDX_INVALID
+ * and nvs_chim_sz is set to 0, then chimney sending
+ * buffer is _not_ used by this RNDIS message.
+ */
+ uint32_t nvs_chim_idx;
+ uint32_t nvs_chim_sz;
+ uint8_t nvs_rsvd[16];
+} __packed;
+CTASSERT(sizeof(struct hn_nvs_rndis) >= HN_NVS_REQSIZE_MIN);
+
+#endif /* !_IF_HNREG_H_ */
diff --git a/sys/dev/hyperv/netvsc/if_hnvar.h b/sys/dev/hyperv/netvsc/if_hnvar.h
new file mode 100644
index 000000000000..d6aef5fb81ca
--- /dev/null
+++ b/sys/dev/hyperv/netvsc/if_hnvar.h
@@ -0,0 +1,104 @@
+/*-
+ * Copyright (c) 2016 Microsoft Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _IF_HNVAR_H_
+#define _IF_HNVAR_H_
+
+#include <sys/param.h>
+
+#include <dev/hyperv/include/vmbus.h>
+#include <dev/hyperv/netvsc/if_hnreg.h>
+
+struct netvsc_dev_;
+struct nvsp_msg_;
+
+struct vmbus_channel;
+struct hn_send_ctx;
+
+typedef void (*hn_sent_callback_t)
+ (struct hn_send_ctx *, struct netvsc_dev_ *,
+ struct vmbus_channel *, const struct nvsp_msg_ *, int);
+
+struct hn_send_ctx {
+ hn_sent_callback_t hn_cb;
+ void *hn_cbarg;
+ uint32_t hn_chim_idx;
+ int hn_chim_sz;
+};
+
+#define HN_SEND_CTX_INITIALIZER(cb, cbarg) \
+{ \
+ .hn_cb = cb, \
+ .hn_cbarg = cbarg, \
+ .hn_chim_idx = HN_NVS_CHIM_IDX_INVALID, \
+ .hn_chim_sz = 0 \
+}
+
+static __inline void
+hn_send_ctx_init(struct hn_send_ctx *sndc, hn_sent_callback_t cb,
+ void *cbarg, uint32_t chim_idx, int chim_sz)
+{
+
+ sndc->hn_cb = cb;
+ sndc->hn_cbarg = cbarg;
+ sndc->hn_chim_idx = chim_idx;
+ sndc->hn_chim_sz = chim_sz;
+}
+
+static __inline void
+hn_send_ctx_init_simple(struct hn_send_ctx *sndc, hn_sent_callback_t cb,
+ void *cbarg)
+{
+
+ hn_send_ctx_init(sndc, cb, cbarg, HN_NVS_CHIM_IDX_INVALID, 0);
+}
+
+static __inline int
+hn_nvs_send(struct vmbus_channel *chan, uint16_t flags,
+ void *nvs_msg, int nvs_msglen, struct hn_send_ctx *sndc)
+{
+
+ return (vmbus_chan_send(chan, VMBUS_CHANPKT_TYPE_INBAND, flags,
+ nvs_msg, nvs_msglen, (uint64_t)(uintptr_t)sndc));
+}
+
+static __inline int
+hn_nvs_send_sglist(struct vmbus_channel *chan, struct vmbus_gpa sg[], int sglen,
+ void *nvs_msg, int nvs_msglen, struct hn_send_ctx *sndc)
+{
+
+ return (vmbus_chan_send_sglist(chan, sg, sglen, nvs_msg, nvs_msglen,
+ (uint64_t)(uintptr_t)sndc));
+}
+
+void hn_nvs_sent_xact(struct hn_send_ctx *sndc,
+ struct netvsc_dev_ *net_dev, struct vmbus_channel *chan,
+ const struct nvsp_msg_ *msg, int dlen);
+void hn_chim_free(struct netvsc_dev_ *net_dev, uint32_t chim_idx);
+
+#endif /* !_IF_HNVAR_H_ */
diff --git a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
index 4a03f7719cfa..b946f87c2c8b 100644
--- a/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
+++ b/sys/dev/hyperv/storvsc/hv_storvsc_drv_freebsd.c
@@ -40,6 +40,7 @@ __FBSDID("$FreeBSD$");
#include <sys/condvar.h>
#include <sys/time.h>
#include <sys/systm.h>
+#include <sys/sysctl.h>
#include <sys/sockio.h>
#include <sys/mbuf.h>
#include <sys/malloc.h>
@@ -52,6 +53,7 @@ __FBSDID("$FreeBSD$");
#include <sys/bus.h>
#include <sys/mutex.h>
#include <sys/callout.h>
+#include <sys/smp.h>
#include <vm/vm.h>
#include <vm/pmap.h>
#include <vm/uma.h>
@@ -86,7 +88,25 @@ __FBSDID("$FreeBSD$");
#define VSTOR_PKT_SIZE (sizeof(struct vstor_packet) - vmscsi_size_delta)
-#define HV_ALIGN(x, a) roundup2(x, a)
+/*
+ * 33 segments are needed to allow 128KB maxio, in case the data
+ * in the first page is _not_ PAGE_SIZE aligned, e.g.
+ *
+ * |<----------- 128KB ----------->|
+ * | |
+ * 0 2K 4K 8K 16K 124K 128K 130K
+ * | | | | | | | |
+ * +--+--+-----+-----+.......+-----+--+--+
+ * | | | | | | | | | DATA
+ * | | | | | | | | |
+ * +--+--+-----+-----+.......------+--+--+
+ * | | | |
+ * | 1| 31 | 1| ...... # of segments
+ */
+#define STORVSC_DATA_SEGCNT_MAX 33
+#define STORVSC_DATA_SEGSZ_MAX PAGE_SIZE
+#define STORVSC_DATA_SIZE_MAX \
+ ((STORVSC_DATA_SEGCNT_MAX - 1) * STORVSC_DATA_SEGSZ_MAX)
struct storvsc_softc;
@@ -101,7 +121,7 @@ struct hv_sgl_page_pool{
boolean_t is_init;
} g_hv_sgl_page_pool;
-#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * VMBUS_CHAN_PRPLIST_MAX
+#define STORVSC_MAX_SG_PAGE_CNT STORVSC_MAX_IO_REQUESTS * STORVSC_DATA_SEGCNT_MAX
enum storvsc_request_type {
WRITE_TYPE,
@@ -109,26 +129,41 @@ enum storvsc_request_type {
UNKNOWN_TYPE
};
-struct hvs_gpa_range {
+SYSCTL_NODE(_hw, OID_AUTO, storvsc, CTLFLAG_RD | CTLFLAG_MPSAFE, NULL,
+ "Hyper-V storage interface");
+
+static u_int hv_storvsc_use_pim_unmapped = 1;
+SYSCTL_INT(_hw_storvsc, OID_AUTO, use_pim_unmapped, CTLFLAG_RDTUN,
+ &hv_storvsc_use_pim_unmapped, 0,
+ "Optimize storvsc by using unmapped I/O");
+
+struct hv_storvsc_sysctl {
+ u_long data_bio_cnt;
+ u_long data_vaddr_cnt;
+ u_long data_sg_cnt;
+};
+
+struct storvsc_gpa_range {
struct vmbus_gpa_range gpa_range;
- uint64_t gpa_page[VMBUS_CHAN_PRPLIST_MAX];
+ uint64_t gpa_page[STORVSC_DATA_SEGCNT_MAX];
} __packed;
struct hv_storvsc_request {
- LIST_ENTRY(hv_storvsc_request) link;
- struct vstor_packet vstor_packet;
- int prp_cnt;
- struct hvs_gpa_range prp_list;
- void *sense_data;
- uint8_t sense_info_len;
- uint8_t retries;
- union ccb *ccb;
- struct storvsc_softc *softc;
- struct callout callout;
- struct sema synch_sema; /*Synchronize the request/response if needed */
- struct sglist *bounce_sgl;
- unsigned int bounce_sgl_count;
- uint64_t not_aligned_seg_bits;
+ LIST_ENTRY(hv_storvsc_request) link;
+ struct vstor_packet vstor_packet;
+ int prp_cnt;
+ struct storvsc_gpa_range prp_list;
+ void *sense_data;
+ uint8_t sense_info_len;
+ uint8_t retries;
+ union ccb *ccb;
+ struct storvsc_softc *softc;
+ struct callout callout;
+ struct sema synch_sema; /*Synchronize the request/response if needed */
+ struct sglist *bounce_sgl;
+ unsigned int bounce_sgl_count;
+ uint64_t not_aligned_seg_bits;
+ bus_dmamap_t data_dmap;
};
struct storvsc_softc {
@@ -147,6 +182,8 @@ struct storvsc_softc {
struct hv_storvsc_request hs_init_req;
struct hv_storvsc_request hs_reset_req;
device_t hs_dev;
+ bus_dma_tag_t storvsc_req_dtag;
+ struct hv_storvsc_sysctl sysctl_data;
struct vmbus_channel *hs_cpu2chan[MAXCPU];
};
@@ -881,6 +918,77 @@ storvsc_create_cpu2chan(struct storvsc_softc *sc)
}
}
+static int
+storvsc_init_requests(device_t dev)
+{
+ struct storvsc_softc *sc = device_get_softc(dev);
+ struct hv_storvsc_request *reqp;
+ int error, i;
+
+ LIST_INIT(&sc->hs_free_list);
+
+ error = bus_dma_tag_create(
+ bus_get_dma_tag(dev), /* parent */
+ 1, /* alignment */
+ PAGE_SIZE, /* boundary */
+ BUS_SPACE_MAXADDR, /* lowaddr */
+ BUS_SPACE_MAXADDR, /* highaddr */
+ NULL, NULL, /* filter, filterarg */
+ STORVSC_DATA_SIZE_MAX, /* maxsize */
+ STORVSC_DATA_SEGCNT_MAX, /* nsegments */
+ STORVSC_DATA_SEGSZ_MAX, /* maxsegsize */
+ 0, /* flags */
+ NULL, /* lockfunc */
+ NULL, /* lockfuncarg */
+ &sc->storvsc_req_dtag);
+ if (error) {
+ device_printf(dev, "failed to create storvsc dma tag\n");
+ return (error);
+ }
+
+ for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
+ reqp = malloc(sizeof(struct hv_storvsc_request),
+ M_DEVBUF, M_WAITOK|M_ZERO);
+ reqp->softc = sc;
+ error = bus_dmamap_create(sc->storvsc_req_dtag, 0,
+ &reqp->data_dmap);
+ if (error) {
+ device_printf(dev, "failed to allocate storvsc "
+ "data dmamap\n");
+ goto cleanup;
+ }
+ LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
+ }
+ return (0);
+
+cleanup:
+ while ((reqp = LIST_FIRST(&sc->hs_free_list)) != NULL) {
+ LIST_REMOVE(reqp, link);
+ bus_dmamap_destroy(sc->storvsc_req_dtag, reqp->data_dmap);
+ free(reqp, M_DEVBUF);
+ }
+ return (error);
+}
+
+static void
+storvsc_sysctl(device_t dev)
+{
+ struct sysctl_oid_list *child;
+ struct sysctl_ctx_list *ctx;
+ struct storvsc_softc *sc;
+
+ sc = device_get_softc(dev);
+ ctx = device_get_sysctl_ctx(dev);
+ child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "data_bio_cnt", CTLFLAG_RW,
+ &sc->sysctl_data.data_bio_cnt, "# of bio data block");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "data_vaddr_cnt", CTLFLAG_RW,
+ &sc->sysctl_data.data_vaddr_cnt, "# of vaddr data block");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "data_sg_cnt", CTLFLAG_RW,
+ &sc->sysctl_data.data_sg_cnt, "# of sg data block");
+}
+
/**
* @brief StorVSC attach function
*
@@ -925,16 +1033,11 @@ storvsc_attach(device_t dev)
sc->hs_unit = device_get_unit(dev);
sc->hs_dev = dev;
- LIST_INIT(&sc->hs_free_list);
mtx_init(&sc->hs_lock, "hvslck", NULL, MTX_DEF);
- for (i = 0; i < sc->hs_drv_props->drv_max_ios_per_target; ++i) {
- reqp = malloc(sizeof(struct hv_storvsc_request),
- M_DEVBUF, M_WAITOK|M_ZERO);
- reqp->softc = sc;
-
- LIST_INSERT_HEAD(&sc->hs_free_list, reqp, link);
- }
+ ret = storvsc_init_requests(dev);
+ if (ret != 0)
+ goto cleanup;
/* create sg-list page pool */
if (FALSE == g_hv_sgl_page_pool.is_init) {
@@ -944,7 +1047,7 @@ storvsc_attach(device_t dev)
/*
* Pre-create SG list, each SG list with
- * VMBUS_CHAN_PRPLIST_MAX segments, each
+ * STORVSC_DATA_SEGCNT_MAX segments, each
* segment has one page buffer
*/
for (i = 0; i < STORVSC_MAX_IO_REQUESTS; i++) {
@@ -952,10 +1055,10 @@ storvsc_attach(device_t dev)
M_DEVBUF, M_WAITOK|M_ZERO);
sgl_node->sgl_data =
- sglist_alloc(VMBUS_CHAN_PRPLIST_MAX,
+ sglist_alloc(STORVSC_DATA_SEGCNT_MAX,
M_WAITOK|M_ZERO);
- for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++) {
+ for (j = 0; j < STORVSC_DATA_SEGCNT_MAX; j++) {
tmp_buff = malloc(PAGE_SIZE,
M_DEVBUF, M_WAITOK|M_ZERO);
@@ -1030,6 +1133,8 @@ storvsc_attach(device_t dev)
mtx_unlock(&sc->hs_lock);
+ storvsc_sysctl(dev);
+
root_mount_rel(root_mount_token);
return (0);
@@ -1039,13 +1144,14 @@ cleanup:
while (!LIST_EMPTY(&sc->hs_free_list)) {
reqp = LIST_FIRST(&sc->hs_free_list);
LIST_REMOVE(reqp, link);
+ bus_dmamap_destroy(sc->storvsc_req_dtag, reqp->data_dmap);
free(reqp, M_DEVBUF);
}
while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
LIST_REMOVE(sgl_node, link);
- for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++) {
+ for (j = 0; j < STORVSC_DATA_SEGCNT_MAX; j++) {
if (NULL !=
(void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
@@ -1100,7 +1206,7 @@ storvsc_detach(device_t dev)
while (!LIST_EMPTY(&sc->hs_free_list)) {
reqp = LIST_FIRST(&sc->hs_free_list);
LIST_REMOVE(reqp, link);
-
+ bus_dmamap_destroy(sc->storvsc_req_dtag, reqp->data_dmap);
free(reqp, M_DEVBUF);
}
mtx_unlock(&sc->hs_lock);
@@ -1108,7 +1214,7 @@ storvsc_detach(device_t dev)
while (!LIST_EMPTY(&g_hv_sgl_page_pool.free_sgl_list)) {
sgl_node = LIST_FIRST(&g_hv_sgl_page_pool.free_sgl_list);
LIST_REMOVE(sgl_node, link);
- for (j = 0; j < VMBUS_CHAN_PRPLIST_MAX; j++){
+ for (j = 0; j < STORVSC_DATA_SEGCNT_MAX; j++){
if (NULL !=
(void*)sgl_node->sgl_data->sg_segs[j].ss_paddr) {
free((void*)sgl_node->sgl_data->sg_segs[j].ss_paddr, M_DEVBUF);
@@ -1293,6 +1399,9 @@ storvsc_action(struct cam_sim *sim, union ccb *ccb)
cpi->hba_inquiry = PI_TAG_ABLE|PI_SDTR_ABLE;
cpi->target_sprt = 0;
cpi->hba_misc = PIM_NOBUSRESET;
+ if (hv_storvsc_use_pim_unmapped)
+ cpi->hba_misc |= PIM_UNMAPPED;
+ cpi->maxio = STORVSC_DATA_SIZE_MAX;
cpi->hba_eng_cnt = 0;
cpi->max_target = STORVSC_MAX_TARGETS;
cpi->max_lun = sc->hs_drv_props->drv_max_luns_per_target;
@@ -1367,6 +1476,7 @@ storvsc_action(struct cam_sim *sim, union ccb *ccb)
case XPT_SCSI_IO:
case XPT_IMMED_NOTIFY: {
struct hv_storvsc_request *reqp = NULL;
+ bus_dmamap_t dmap_saved;
if (ccb->csio.cdb_len == 0) {
panic("cdl_len is 0\n");
@@ -1385,7 +1495,14 @@ storvsc_action(struct cam_sim *sim, union ccb *ccb)
reqp = LIST_FIRST(&sc->hs_free_list);
LIST_REMOVE(reqp, link);
+ /* Save the data_dmap before reset request */
+ dmap_saved = reqp->data_dmap;
+
+ /* XXX this is ugly */
bzero(reqp, sizeof(struct hv_storvsc_request));
+
+ /* Restore necessary bits */
+ reqp->data_dmap = dmap_saved;
reqp->softc = sc;
ccb->ccb_h.status |= CAM_SIM_QUEUED;
@@ -1641,6 +1758,50 @@ storvsc_check_bounce_buffer_sgl(bus_dma_segment_t *sgl,
}
/**
+ * Copy bus_dma segments to multiple page buffer, which requires
+ * the pages are compact composed except for the 1st and last pages.
+ */
+static void
+storvsc_xferbuf_prepare(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
+{
+ struct hv_storvsc_request *reqp = arg;
+ union ccb *ccb = reqp->ccb;
+ struct ccb_scsiio *csio = &ccb->csio;
+ struct storvsc_gpa_range *prplist;
+ int i;
+
+ prplist = &reqp->prp_list;
+ prplist->gpa_range.gpa_len = csio->dxfer_len;
+ prplist->gpa_range.gpa_ofs = segs[0].ds_addr & PAGE_MASK;
+
+ for (i = 0; i < nsegs; i++) {
+#ifdef INVARIANTS
+ if (nsegs > 1) {
+ if (i == 0) {
+ KASSERT((segs[i].ds_addr & PAGE_MASK) +
+ segs[i].ds_len == PAGE_SIZE,
+ ("invalid 1st page, ofs 0x%jx, len %zu",
+ (uintmax_t)segs[i].ds_addr,
+ segs[i].ds_len));
+ } else if (i == nsegs - 1) {
+ KASSERT((segs[i].ds_addr & PAGE_MASK) == 0,
+ ("invalid last page, ofs 0x%jx",
+ (uintmax_t)segs[i].ds_addr));
+ } else {
+ KASSERT((segs[i].ds_addr & PAGE_MASK) == 0 &&
+ segs[i].ds_len == PAGE_SIZE,
+ ("not a full page, ofs 0x%jx, len %zu",
+ (uintmax_t)segs[i].ds_addr,
+ segs[i].ds_len));
+ }
+ }
+#endif
+ prplist->gpa_page[i] = atop(segs[i].ds_addr);
+ }
+ reqp->prp_cnt = nsegs;
+}
+
+/**
* @brief Fill in a request structure based on a CAM control block
*
* Fills in a request structure based on the contents of a CAM control
@@ -1655,11 +1816,9 @@ create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
{
struct ccb_scsiio *csio = &ccb->csio;
uint64_t phys_addr;
- uint32_t bytes_to_copy = 0;
- uint32_t pfn_num = 0;
uint32_t pfn;
uint64_t not_aligned_seg_bits = 0;
- struct hvs_gpa_range *prplist;
+ int error;
/* refer to struct vmscsi_req for meanings of these two fields */
reqp->vstor_packet.u.vm_srb.port =
@@ -1703,36 +1862,26 @@ create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
return (0);
}
- prplist = &reqp->prp_list;
- prplist->gpa_range.gpa_len = csio->dxfer_len;
-
switch (ccb->ccb_h.flags & CAM_DATA_MASK) {
+ case CAM_DATA_BIO:
case CAM_DATA_VADDR:
- {
- bytes_to_copy = csio->dxfer_len;
- phys_addr = vtophys(csio->data_ptr);
- prplist->gpa_range.gpa_ofs = phys_addr & PAGE_MASK;
-
- while (bytes_to_copy != 0) {
- int bytes, page_offset;
- phys_addr =
- vtophys(&csio->data_ptr[prplist->gpa_range.gpa_len -
- bytes_to_copy]);
- pfn = phys_addr >> PAGE_SHIFT;
- prplist->gpa_page[pfn_num] = pfn;
- page_offset = phys_addr & PAGE_MASK;
-
- bytes = min(PAGE_SIZE - page_offset, bytes_to_copy);
-
- bytes_to_copy -= bytes;
- pfn_num++;
+ error = bus_dmamap_load_ccb(reqp->softc->storvsc_req_dtag,
+ reqp->data_dmap, ccb, storvsc_xferbuf_prepare, reqp,
+ BUS_DMA_NOWAIT);
+ if (error) {
+ xpt_print(ccb->ccb_h.path,
+ "bus_dmamap_load_ccb failed: %d\n", error);
+ return (error);
}
- reqp->prp_cnt = pfn_num;
+ if ((ccb->ccb_h.flags & CAM_DATA_MASK) == CAM_DATA_BIO)
+ reqp->softc->sysctl_data.data_bio_cnt++;
+ else
+ reqp->softc->sysctl_data.data_vaddr_cnt++;
break;
- }
case CAM_DATA_SG:
{
+ struct storvsc_gpa_range *prplist;
int i = 0;
int offset = 0;
int ret;
@@ -1741,13 +1890,16 @@ create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
(bus_dma_segment_t *)ccb->csio.data_ptr;
u_int16_t storvsc_sg_count = ccb->csio.sglist_cnt;
+ prplist = &reqp->prp_list;
+ prplist->gpa_range.gpa_len = csio->dxfer_len;
+
printf("Storvsc: get SG I/O operation, %d\n",
reqp->vstor_packet.u.vm_srb.data_in);
- if (storvsc_sg_count > VMBUS_CHAN_PRPLIST_MAX){
+ if (storvsc_sg_count > STORVSC_DATA_SEGCNT_MAX){
printf("Storvsc: %d segments is too much, "
"only support %d segments\n",
- storvsc_sg_count, VMBUS_CHAN_PRPLIST_MAX);
+ storvsc_sg_count, STORVSC_DATA_SEGCNT_MAX);
return (EINVAL);
}
@@ -1844,6 +1996,7 @@ create_storvsc_request(union ccb *ccb, struct hv_storvsc_request *reqp)
reqp->bounce_sgl_count = 0;
}
+ reqp->softc->sysctl_data.data_sg_cnt++;
break;
}
default:
@@ -1971,8 +2124,8 @@ storvsc_io_done(struct hv_storvsc_request *reqp)
* For more information about INQUIRY, please refer to:
* ftp://ftp.avc-pioneer.com/Mtfuji_7/Proposal/Jun09/INQUIRY.pdf
*/
- const struct scsi_inquiry_data *inq_data =
- (const struct scsi_inquiry_data *)csio->data_ptr;
+ struct scsi_inquiry_data *inq_data =
+ (struct scsi_inquiry_data *)csio->data_ptr;
uint8_t* resp_buf = (uint8_t*)csio->data_ptr;
/* Get the buffer length reported by host */
int resp_xfer_len = vm_srb->transfer_len;
@@ -2001,6 +2154,25 @@ storvsc_io_done(struct hv_storvsc_request *reqp)
mtx_unlock(&sc->hs_lock);
}
} else {
+ char vendor[16];
+ cam_strvis(vendor, inq_data->vendor, sizeof(inq_data->vendor),
+ sizeof(vendor));
+ /**
+ * XXX: upgrade SPC2 to SPC3 if host is WIN8 or WIN2012 R2
+ * in order to support UNMAP feature
+ */
+ if (!strncmp(vendor,"Msft",4) &&
+ SID_ANSI_REV(inq_data) == SCSI_REV_SPC2 &&
+ (vmstor_proto_version == VMSTOR_PROTOCOL_VERSION_WIN8_1 ||
+ vmstor_proto_version== VMSTOR_PROTOCOL_VERSION_WIN8)) {
+ inq_data->version = SCSI_REV_SPC3;
+ if (bootverbose) {
+ mtx_lock(&sc->hs_lock);
+ xpt_print(ccb->ccb_h.path,
+ "storvsc upgrades SPC2 to SPC3\n");
+ mtx_unlock(&sc->hs_lock);
+ }
+ }
ccb->ccb_h.status |= CAM_REQ_CMP;
if (bootverbose) {
mtx_lock(&sc->hs_lock);
diff --git a/sys/dev/hyperv/utilities/hv_heartbeat.c b/sys/dev/hyperv/utilities/hv_heartbeat.c
index 5c23f725b230..703380b6c3ce 100644
--- a/sys/dev/hyperv/utilities/hv_heartbeat.c
+++ b/sys/dev/hyperv/utilities/hv_heartbeat.c
@@ -33,6 +33,7 @@
#include <sys/module.h>
#include <sys/timetc.h>
#include <sys/syscallsubr.h>
+#include <sys/systm.h>
#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/vmbus.h>
@@ -63,7 +64,7 @@ hv_heartbeat_cb(struct vmbus_channel *channel, void *context)
softc = (hv_util_sc*)context;
buf = softc->receive_buffer;
- recvlen = PAGE_SIZE;
+ recvlen = softc->ic_buflen;
ret = vmbus_chan_recv(channel, buf, &recvlen, &requestid);
KASSERT(ret != ENOBUFS, ("hvheartbeat recvbuf is not large enough"));
/* XXX check recvlen to make sure that it contains enough data */
@@ -74,8 +75,7 @@ hv_heartbeat_cb(struct vmbus_channel *channel, void *context)
&buf[sizeof(struct hv_vmbus_pipe_hdr)];
if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) {
- hv_negotiate_version(icmsghdrp, NULL, buf);
-
+ hv_negotiate_version(icmsghdrp, buf);
} else {
heartbeat_msg =
(struct hv_vmbus_heartbeat_msg_data *)
@@ -109,11 +109,7 @@ hv_heartbeat_probe(device_t dev)
static int
hv_heartbeat_attach(device_t dev)
{
- hv_util_sc *softc = (hv_util_sc*)device_get_softc(dev);
-
- softc->callback = hv_heartbeat_cb;
-
- return hv_util_attach(dev);
+ return hv_util_attach(dev, hv_heartbeat_cb);
}
static device_method_t heartbeat_methods[] = {
diff --git a/sys/dev/hyperv/utilities/hv_kvp.c b/sys/dev/hyperv/utilities/hv_kvp.c
index 6c5d0b179b71..d83601251575 100644
--- a/sys/dev/hyperv/utilities/hv_kvp.c
+++ b/sys/dev/hyperv/utilities/hv_kvp.c
@@ -629,7 +629,7 @@ hv_kvp_process_request(void *context, int pending)
kvp_buf = sc->util_sc.receive_buffer;
channel = vmbus_get_channel(sc->dev);
- recvlen = 2 * PAGE_SIZE;
+ recvlen = sc->util_sc.ic_buflen;
ret = vmbus_chan_recv(channel, kvp_buf, &recvlen, &requestid);
KASSERT(ret != ENOBUFS, ("hvkvp recvbuf is not large enough"));
/* XXX check recvlen to make sure that it contains enough data */
@@ -696,7 +696,7 @@ hv_kvp_process_request(void *context, int pending)
/*
* Try reading next buffer
*/
- recvlen = 2 * PAGE_SIZE;
+ recvlen = sc->util_sc.ic_buflen;
ret = vmbus_chan_recv(channel, kvp_buf, &recvlen, &requestid);
KASSERT(ret != ENOBUFS, ("hvkvp recvbuf is not large enough"));
/* XXX check recvlen to make sure that it contains enough data */
@@ -892,7 +892,6 @@ hv_kvp_attach(device_t dev)
hv_kvp_sc *sc = (hv_kvp_sc*)device_get_softc(dev);
- sc->util_sc.callback = hv_kvp_callback;
sc->dev = dev;
sema_init(&sc->dev_sema, 0, "hv_kvp device semaphore");
mtx_init(&sc->pending_mutex, "hv-kvp pending mutex",
@@ -920,7 +919,7 @@ hv_kvp_attach(device_t dev)
return (error);
sc->hv_kvp_dev->si_drv1 = sc;
- return hv_util_attach(dev);
+ return hv_util_attach(dev, hv_kvp_callback);
}
static int
diff --git a/sys/dev/hyperv/utilities/hv_shutdown.c b/sys/dev/hyperv/utilities/hv_shutdown.c
index 521b2dae174e..fab1a227e5d6 100644
--- a/sys/dev/hyperv/utilities/hv_shutdown.c
+++ b/sys/dev/hyperv/utilities/hv_shutdown.c
@@ -38,6 +38,7 @@
#include <sys/reboot.h>
#include <sys/timetc.h>
#include <sys/syscallsubr.h>
+#include <sys/systm.h>
#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/vmbus.h>
@@ -67,7 +68,7 @@ hv_shutdown_cb(struct vmbus_channel *channel, void *context)
softc = (hv_util_sc*)context;
buf = softc->receive_buffer;
- recv_len = PAGE_SIZE;
+ recv_len = softc->ic_buflen;
ret = vmbus_chan_recv(channel, buf, &recv_len, &request_id);
KASSERT(ret != ENOBUFS, ("hvshutdown recvbuf is not large enough"));
/* XXX check recv_len to make sure that it contains enough data */
@@ -78,8 +79,7 @@ hv_shutdown_cb(struct vmbus_channel *channel, void *context)
&buf[sizeof(struct hv_vmbus_pipe_hdr)];
if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) {
- hv_negotiate_version(icmsghdrp, NULL, buf);
-
+ hv_negotiate_version(icmsghdrp, buf);
} else {
shutdown_msg =
(struct hv_vmbus_shutdown_msg_data *)
@@ -131,11 +131,7 @@ hv_shutdown_probe(device_t dev)
static int
hv_shutdown_attach(device_t dev)
{
- hv_util_sc *softc = (hv_util_sc*)device_get_softc(dev);
-
- softc->callback = hv_shutdown_cb;
-
- return hv_util_attach(dev);
+ return hv_util_attach(dev, hv_shutdown_cb);
}
static device_method_t shutdown_methods[] = {
diff --git a/sys/dev/hyperv/utilities/hv_timesync.c b/sys/dev/hyperv/utilities/hv_timesync.c
index b46e4c799e4c..1cd7bc2274f9 100644
--- a/sys/dev/hyperv/utilities/hv_timesync.c
+++ b/sys/dev/hyperv/utilities/hv_timesync.c
@@ -38,6 +38,8 @@
#include <sys/reboot.h>
#include <sys/timetc.h>
#include <sys/syscallsubr.h>
+#include <sys/systm.h>
+#include <sys/taskqueue.h>
#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/vmbus.h>
@@ -143,7 +145,7 @@ hv_timesync_cb(struct vmbus_channel *channel, void *context)
softc = (hv_timesync_sc*)context;
time_buf = softc->util_sc.receive_buffer;
- recvlen = PAGE_SIZE;
+ recvlen = softc->util_sc.ic_buflen;
ret = vmbus_chan_recv(channel, time_buf, &recvlen, &requestId);
KASSERT(ret != ENOBUFS, ("hvtimesync recvbuf is not large enough"));
/* XXX check recvlen to make sure that it contains enough data */
@@ -153,7 +155,7 @@ hv_timesync_cb(struct vmbus_channel *channel, void *context)
sizeof(struct hv_vmbus_pipe_hdr)];
if (icmsghdrp->icmsgtype == HV_ICMSGTYPE_NEGOTIATE) {
- hv_negotiate_version(icmsghdrp, NULL, time_buf);
+ hv_negotiate_version(icmsghdrp, time_buf);
} else {
timedatap = (struct hv_ictimesync_data *) &time_buf[
sizeof(struct hv_vmbus_pipe_hdr) +
@@ -187,18 +189,16 @@ hv_timesync_attach(device_t dev)
{
hv_timesync_sc *softc = device_get_softc(dev);
- softc->util_sc.callback = hv_timesync_cb;
TASK_INIT(&softc->task, 1, hv_set_host_time, softc);
-
- return hv_util_attach(dev);
+ return hv_util_attach(dev, hv_timesync_cb);
}
static int
hv_timesync_detach(device_t dev)
{
hv_timesync_sc *softc = device_get_softc(dev);
- taskqueue_drain(taskqueue_thread, &softc->task);
+ taskqueue_drain(taskqueue_thread, &softc->task);
return hv_util_detach(dev);
}
diff --git a/sys/dev/hyperv/utilities/hv_util.c b/sys/dev/hyperv/utilities/hv_util.c
index e398faa75ae6..e7ecf32ca9fb 100644
--- a/sys/dev/hyperv/utilities/hv_util.c
+++ b/sys/dev/hyperv/utilities/hv_util.c
@@ -44,12 +44,13 @@
#include <dev/hyperv/utilities/hv_utilreg.h>
#include "hv_util.h"
+#define VMBUS_IC_BRSIZE (4 * PAGE_SIZE)
+
void
-hv_negotiate_version(
- struct hv_vmbus_icmsg_hdr* icmsghdrp,
- struct hv_vmbus_icmsg_negotiate* negop,
- uint8_t* buf)
+hv_negotiate_version(struct hv_vmbus_icmsg_hdr *icmsghdrp, uint8_t *buf)
{
+ struct hv_vmbus_icmsg_negotiate *negop;
+
icmsghdrp->icmsgsize = 0x10;
negop = (struct hv_vmbus_icmsg_negotiate *)&buf[
@@ -74,16 +75,15 @@ hv_negotiate_version(
}
int
-hv_util_attach(device_t dev)
+hv_util_attach(device_t dev, vmbus_chan_callback_t cb)
{
- struct hv_util_sc* softc;
- struct vmbus_channel *chan;
- int ret;
+ struct hv_util_sc *sc = device_get_softc(dev);
+ struct vmbus_channel *chan = vmbus_get_channel(dev);
+ int error;
- softc = device_get_softc(dev);
- softc->receive_buffer =
- malloc(4 * PAGE_SIZE, M_DEVBUF, M_WAITOK | M_ZERO);
- chan = vmbus_get_channel(dev);
+ sc->ic_buflen = VMBUS_IC_BRSIZE;
+ sc->receive_buffer = malloc(VMBUS_IC_BRSIZE, M_DEVBUF,
+ M_WAITOK | M_ZERO);
/*
* These services are not performance critical and do not need
@@ -94,17 +94,13 @@ hv_util_attach(device_t dev)
*/
vmbus_chan_set_readbatch(chan, false);
- ret = vmbus_chan_open(chan, 4 * PAGE_SIZE, 4 * PAGE_SIZE, NULL, 0,
- softc->callback, softc);
-
- if (ret)
- goto error0;
-
+ error = vmbus_chan_open(chan, VMBUS_IC_BRSIZE, VMBUS_IC_BRSIZE, NULL, 0,
+ cb, sc);
+ if (error) {
+ free(sc->receive_buffer, M_DEVBUF);
+ return (error);
+ }
return (0);
-
-error0:
- free(softc->receive_buffer, M_DEVBUF);
- return (ret);
}
int
diff --git a/sys/dev/hyperv/utilities/hv_util.h b/sys/dev/hyperv/utilities/hv_util.h
index 09202e79ca5e..8cc9fdaa7e62 100644
--- a/sys/dev/hyperv/utilities/hv_util.h
+++ b/sys/dev/hyperv/utilities/hv_util.h
@@ -31,23 +31,20 @@
#ifndef _HVUTIL_H_
#define _HVUTIL_H_
+#include <dev/hyperv/include/vmbus.h>
+
/**
* hv_util related structures
*
*/
typedef struct hv_util_sc {
- /*
- * function to process Hyper-V messages
- */
- void (*callback)(struct vmbus_channel *, void *);
uint8_t *receive_buffer;
+ int ic_buflen;
} hv_util_sc;
-void hv_negotiate_version(
- struct hv_vmbus_icmsg_hdr* icmsghdrp,
- struct hv_vmbus_icmsg_negotiate* negop,
- uint8_t* buf);
+void hv_negotiate_version(struct hv_vmbus_icmsg_hdr *icmsghdrp, uint8_t *buf);
-int hv_util_attach(device_t dev);
+int hv_util_attach(device_t dev, vmbus_chan_callback_t cb);
int hv_util_detach(device_t dev);
+
#endif
diff --git a/sys/dev/hyperv/vmbus/hv_ring_buffer.c b/sys/dev/hyperv/vmbus/hv_ring_buffer.c
deleted file mode 100644
index e8dd836e9e3f..000000000000
--- a/sys/dev/hyperv/vmbus/hv_ring_buffer.c
+++ /dev/null
@@ -1,524 +0,0 @@
-/*-
- * Copyright (c) 2009-2012,2016 Microsoft Corp.
- * Copyright (c) 2012 NetApp Inc.
- * Copyright (c) 2012 Citrix Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include <sys/param.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/sysctl.h>
-
-#include "hv_vmbus_priv.h"
-#include <dev/hyperv/vmbus/vmbus_reg.h>
-
-/* Amount of space to write to */
-#define HV_BYTES_AVAIL_TO_WRITE(r, w, z) \
- ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w))
-
-static uint32_t copy_to_ring_buffer(hv_vmbus_ring_buffer_info *ring_info,
- uint32_t start_write_offset, const uint8_t *src,
- uint32_t src_len);
-static uint32_t copy_from_ring_buffer(hv_vmbus_ring_buffer_info *ring_info,
- char *dest, uint32_t dest_len, uint32_t start_read_offset);
-
-static int
-vmbus_br_sysctl_state(SYSCTL_HANDLER_ARGS)
-{
- const hv_vmbus_ring_buffer_info *br = arg1;
- uint32_t rindex, windex, intr_mask, ravail, wavail;
- char state[256];
-
- rindex = br->ring_buffer->br_rindex;
- windex = br->ring_buffer->br_windex;
- intr_mask = br->ring_buffer->br_imask;
- wavail = HV_BYTES_AVAIL_TO_WRITE(rindex, windex, br->ring_data_size);
- ravail = br->ring_data_size - wavail;
-
- snprintf(state, sizeof(state),
- "rindex:%u windex:%u intr_mask:%u ravail:%u wavail:%u",
- rindex, windex, intr_mask, ravail, wavail);
- return sysctl_handle_string(oidp, state, sizeof(state), req);
-}
-
-/*
- * Binary bufring states.
- */
-static int
-vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS)
-{
-#define BR_STATE_RIDX 0
-#define BR_STATE_WIDX 1
-#define BR_STATE_IMSK 2
-#define BR_STATE_RSPC 3
-#define BR_STATE_WSPC 4
-#define BR_STATE_MAX 5
-
- const hv_vmbus_ring_buffer_info *br = arg1;
- uint32_t rindex, windex, wavail, state[BR_STATE_MAX];
-
- rindex = br->ring_buffer->br_rindex;
- windex = br->ring_buffer->br_windex;
- wavail = HV_BYTES_AVAIL_TO_WRITE(rindex, windex, br->ring_data_size);
-
- state[BR_STATE_RIDX] = rindex;
- state[BR_STATE_WIDX] = windex;
- state[BR_STATE_IMSK] = br->ring_buffer->br_imask;
- state[BR_STATE_WSPC] = wavail;
- state[BR_STATE_RSPC] = br->ring_data_size - wavail;
-
- return sysctl_handle_opaque(oidp, state, sizeof(state), req);
-}
-
-void
-vmbus_br_sysctl_create(struct sysctl_ctx_list *ctx, struct sysctl_oid *br_tree,
- hv_vmbus_ring_buffer_info *br, const char *name)
-{
- struct sysctl_oid *tree;
- char desc[64];
-
- tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(br_tree), OID_AUTO,
- name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
- if (tree == NULL)
- return;
-
- snprintf(desc, sizeof(desc), "%s state", name);
- SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state",
- CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
- br, 0, vmbus_br_sysctl_state, "A", desc);
-
- snprintf(desc, sizeof(desc), "%s binary state", name);
- SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state_bin",
- CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
- br, 0, vmbus_br_sysctl_state_bin, "IU", desc);
-}
-
-/**
- * @brief Get number of bytes available to read and to write to
- * for the specified ring buffer
- */
-static __inline void
-get_ring_buffer_avail_bytes(hv_vmbus_ring_buffer_info *rbi, uint32_t *read,
- uint32_t *write)
-{
- uint32_t read_loc, write_loc;
-
- /*
- * Capture the read/write indices before they changed
- */
- read_loc = rbi->ring_buffer->br_rindex;
- write_loc = rbi->ring_buffer->br_windex;
-
- *write = HV_BYTES_AVAIL_TO_WRITE(read_loc, write_loc,
- rbi->ring_data_size);
- *read = rbi->ring_data_size - *write;
-}
-
-/**
- * @brief Get the next write location for the specified ring buffer
- */
-static __inline uint32_t
-get_next_write_location(hv_vmbus_ring_buffer_info *ring_info)
-{
- return ring_info->ring_buffer->br_windex;
-}
-
-/**
- * @brief Set the next write location for the specified ring buffer
- */
-static __inline void
-set_next_write_location(hv_vmbus_ring_buffer_info *ring_info,
- uint32_t next_write_location)
-{
- ring_info->ring_buffer->br_windex = next_write_location;
-}
-
-/**
- * @brief Get the next read location for the specified ring buffer
- */
-static __inline uint32_t
-get_next_read_location(hv_vmbus_ring_buffer_info *ring_info)
-{
- return ring_info->ring_buffer->br_rindex;
-}
-
-/**
- * @brief Get the next read location + offset for the specified ring buffer.
- * This allows the caller to skip.
- */
-static __inline uint32_t
-get_next_read_location_with_offset(hv_vmbus_ring_buffer_info *ring_info,
- uint32_t offset)
-{
- uint32_t next = ring_info->ring_buffer->br_rindex;
-
- next += offset;
- next %= ring_info->ring_data_size;
- return (next);
-}
-
-/**
- * @brief Set the next read location for the specified ring buffer
- */
-static __inline void
-set_next_read_location(hv_vmbus_ring_buffer_info *ring_info,
- uint32_t next_read_location)
-{
- ring_info->ring_buffer->br_rindex = next_read_location;
-}
-
-/**
- * @brief Get the start of the ring buffer
- */
-static __inline void *
-get_ring_buffer(hv_vmbus_ring_buffer_info *ring_info)
-{
- return ring_info->ring_buffer->br_data;
-}
-
-/**
- * @brief Get the size of the ring buffer.
- */
-static __inline uint32_t
-get_ring_buffer_size(hv_vmbus_ring_buffer_info *ring_info)
-{
- return ring_info->ring_data_size;
-}
-
-/**
- * Get the read and write indices as uint64_t of the specified ring buffer.
- */
-static __inline uint64_t
-get_ring_buffer_indices(hv_vmbus_ring_buffer_info *ring_info)
-{
- return ((uint64_t)ring_info->ring_buffer->br_windex) << 32;
-}
-
-void
-hv_ring_buffer_read_begin(hv_vmbus_ring_buffer_info *ring_info)
-{
- ring_info->ring_buffer->br_imask = 1;
- mb();
-}
-
-uint32_t
-hv_ring_buffer_read_end(hv_vmbus_ring_buffer_info *ring_info)
-{
- uint32_t read, write;
-
- ring_info->ring_buffer->br_imask = 0;
- mb();
-
- /*
- * Now check to see if the ring buffer is still empty.
- * If it is not, we raced and we need to process new
- * incoming messages.
- */
- get_ring_buffer_avail_bytes(ring_info, &read, &write);
- return (read);
-}
-
-/*
- * When we write to the ring buffer, check if the host needs to
- * be signaled. Here is the details of this protocol:
- *
- * 1. The host guarantees that while it is draining the
- * ring buffer, it will set the interrupt_mask to
- * indicate it does not need to be interrupted when
- * new data is placed.
- *
- * 2. The host guarantees that it will completely drain
- * the ring buffer before exiting the read loop. Further,
- * once the ring buffer is empty, it will clear the
- * interrupt_mask and re-check to see if new data has
- * arrived.
- */
-static boolean_t
-hv_ring_buffer_needsig_on_write(uint32_t old_write_location,
- hv_vmbus_ring_buffer_info *rbi)
-{
- mb();
- if (rbi->ring_buffer->br_imask)
- return (FALSE);
-
- /* Read memory barrier */
- rmb();
- /*
- * This is the only case we need to signal when the
- * ring transitions from being empty to non-empty.
- */
- if (old_write_location == rbi->ring_buffer->br_rindex)
- return (TRUE);
-
- return (FALSE);
-}
-
-/**
- * @brief Initialize the ring buffer.
- */
-int
-hv_vmbus_ring_buffer_init(hv_vmbus_ring_buffer_info *ring_info, void *buffer,
- uint32_t buffer_len)
-{
- memset(ring_info, 0, sizeof(hv_vmbus_ring_buffer_info));
-
- ring_info->ring_buffer = buffer;
- ring_info->ring_buffer->br_rindex = 0;
- ring_info->ring_buffer->br_windex = 0;
-
- ring_info->ring_data_size = buffer_len - sizeof(struct vmbus_bufring);
- mtx_init(&ring_info->ring_lock, "vmbus ring buffer", NULL, MTX_SPIN);
-
- return (0);
-}
-
-/**
- * @brief Cleanup the ring buffer.
- */
-void
-hv_ring_buffer_cleanup(hv_vmbus_ring_buffer_info *ring_info)
-{
- mtx_destroy(&ring_info->ring_lock);
-}
-
-/**
- * @brief Write to the ring buffer.
- */
-int
-hv_ring_buffer_write(hv_vmbus_ring_buffer_info *out_ring_info,
- const struct iovec iov[], uint32_t iovlen, boolean_t *need_sig)
-{
- int i = 0;
- uint32_t byte_avail_to_write;
- uint32_t byte_avail_to_read;
- uint32_t old_write_location;
- uint32_t total_bytes_to_write = 0;
- volatile uint32_t next_write_location;
- uint64_t prev_indices = 0;
-
- for (i = 0; i < iovlen; i++)
- total_bytes_to_write += iov[i].iov_len;
-
- total_bytes_to_write += sizeof(uint64_t);
-
- mtx_lock_spin(&out_ring_info->ring_lock);
-
- get_ring_buffer_avail_bytes(out_ring_info, &byte_avail_to_read,
- &byte_avail_to_write);
-
- /*
- * If there is only room for the packet, assume it is full.
- * Otherwise, the next time around, we think the ring buffer
- * is empty since the read index == write index
- */
- if (byte_avail_to_write <= total_bytes_to_write) {
- mtx_unlock_spin(&out_ring_info->ring_lock);
- return (EAGAIN);
- }
-
- /*
- * Write to the ring buffer
- */
- next_write_location = get_next_write_location(out_ring_info);
-
- old_write_location = next_write_location;
-
- for (i = 0; i < iovlen; i++) {
- next_write_location = copy_to_ring_buffer(out_ring_info,
- next_write_location, iov[i].iov_base, iov[i].iov_len);
- }
-
- /*
- * Set previous packet start
- */
- prev_indices = get_ring_buffer_indices(out_ring_info);
-
- next_write_location = copy_to_ring_buffer(out_ring_info,
- next_write_location, (char *)&prev_indices, sizeof(uint64_t));
-
- /*
- * Full memory barrier before upding the write index.
- */
- mb();
-
- /*
- * Now, update the write location
- */
- set_next_write_location(out_ring_info, next_write_location);
-
- mtx_unlock_spin(&out_ring_info->ring_lock);
-
- *need_sig = hv_ring_buffer_needsig_on_write(old_write_location,
- out_ring_info);
-
- return (0);
-}
-
-/**
- * @brief Read without advancing the read index.
- */
-int
-hv_ring_buffer_peek(hv_vmbus_ring_buffer_info *in_ring_info, void *buffer,
- uint32_t buffer_len)
-{
- uint32_t bytesAvailToWrite;
- uint32_t bytesAvailToRead;
- uint32_t nextReadLocation = 0;
-
- mtx_lock_spin(&in_ring_info->ring_lock);
-
- get_ring_buffer_avail_bytes(in_ring_info, &bytesAvailToRead,
- &bytesAvailToWrite);
-
- /*
- * Make sure there is something to read
- */
- if (bytesAvailToRead < buffer_len) {
- mtx_unlock_spin(&in_ring_info->ring_lock);
- return (EAGAIN);
- }
-
- /*
- * Convert to byte offset
- */
- nextReadLocation = get_next_read_location(in_ring_info);
-
- nextReadLocation = copy_from_ring_buffer(in_ring_info,
- (char *)buffer, buffer_len, nextReadLocation);
-
- mtx_unlock_spin(&in_ring_info->ring_lock);
-
- return (0);
-}
-
-/**
- * @brief Read and advance the read index.
- */
-int
-hv_ring_buffer_read(hv_vmbus_ring_buffer_info *in_ring_info, void *buffer,
- uint32_t buffer_len, uint32_t offset)
-{
- uint32_t bytes_avail_to_write;
- uint32_t bytes_avail_to_read;
- uint32_t next_read_location = 0;
- uint64_t prev_indices = 0;
-
- if (buffer_len <= 0)
- return (EINVAL);
-
- mtx_lock_spin(&in_ring_info->ring_lock);
-
- get_ring_buffer_avail_bytes(in_ring_info, &bytes_avail_to_read,
- &bytes_avail_to_write);
-
- /*
- * Make sure there is something to read
- */
- if (bytes_avail_to_read < buffer_len) {
- mtx_unlock_spin(&in_ring_info->ring_lock);
- return (EAGAIN);
- }
-
- next_read_location = get_next_read_location_with_offset(in_ring_info,
- offset);
-
- next_read_location = copy_from_ring_buffer(in_ring_info, (char *)buffer,
- buffer_len, next_read_location);
-
- next_read_location = copy_from_ring_buffer(in_ring_info,
- (char *)&prev_indices, sizeof(uint64_t), next_read_location);
-
- /*
- * Make sure all reads are done before we update the read index since
- * the writer may start writing to the read area once the read index
- * is updated.
- */
- wmb();
-
- /*
- * Update the read index
- */
- set_next_read_location(in_ring_info, next_read_location);
-
- mtx_unlock_spin(&in_ring_info->ring_lock);
-
- return (0);
-}
-
-/**
- * @brief Helper routine to copy from source to ring buffer.
- *
- * Assume there is enough room. Handles wrap-around in dest case only!
- */
-static uint32_t
-copy_to_ring_buffer(hv_vmbus_ring_buffer_info *ring_info,
- uint32_t start_write_offset, const uint8_t *src, uint32_t src_len)
-{
- char *ring_buffer = get_ring_buffer(ring_info);
- uint32_t ring_buffer_size = get_ring_buffer_size(ring_info);
- uint32_t fragLen;
-
- if (src_len > ring_buffer_size - start_write_offset) {
- /* wrap-around detected! */
- fragLen = ring_buffer_size - start_write_offset;
- memcpy(ring_buffer + start_write_offset, src, fragLen);
- memcpy(ring_buffer, src + fragLen, src_len - fragLen);
- } else {
- memcpy(ring_buffer + start_write_offset, src, src_len);
- }
-
- start_write_offset += src_len;
- start_write_offset %= ring_buffer_size;
-
- return (start_write_offset);
-}
-
-/**
- * @brief Helper routine to copy to source from ring buffer.
- *
- * Assume there is enough room. Handles wrap-around in src case only!
- */
-static uint32_t
-copy_from_ring_buffer(hv_vmbus_ring_buffer_info *ring_info, char *dest,
- uint32_t dest_len, uint32_t start_read_offset)
-{
- uint32_t fragLen;
- char *ring_buffer = get_ring_buffer(ring_info);
- uint32_t ring_buffer_size = get_ring_buffer_size(ring_info);
-
- if (dest_len > ring_buffer_size - start_read_offset) {
- /* wrap-around detected at the src */
- fragLen = ring_buffer_size - start_read_offset;
- memcpy(dest, ring_buffer + start_read_offset, fragLen);
- memcpy(dest + fragLen, ring_buffer, dest_len - fragLen);
- } else {
- memcpy(dest, ring_buffer + start_read_offset, dest_len);
- }
-
- start_read_offset += dest_len;
- start_read_offset %= ring_buffer_size;
-
- return (start_read_offset);
-}
diff --git a/sys/dev/hyperv/vmbus/hv_vmbus_priv.h b/sys/dev/hyperv/vmbus/hv_vmbus_priv.h
deleted file mode 100644
index b57a96378dc7..000000000000
--- a/sys/dev/hyperv/vmbus/hv_vmbus_priv.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*-
- * Copyright (c) 2009-2012,2016 Microsoft Corp.
- * Copyright (c) 2012 NetApp Inc.
- * Copyright (c) 2012 Citrix Inc.
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice unmodified, this list of conditions, and the following
- * disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef __HYPERV_PRIV_H__
-#define __HYPERV_PRIV_H__
-
-#include <sys/param.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/sema.h>
-#include <sys/_iovec.h>
-
-#include <dev/hyperv/vmbus/vmbus_chanvar.h>
-
-struct vmbus_softc;
-
-/*
- * Private, VM Bus functions
- */
-struct sysctl_ctx_list;
-struct sysctl_oid;
-
-void vmbus_br_sysctl_create(struct sysctl_ctx_list *ctx,
- struct sysctl_oid *br_tree, hv_vmbus_ring_buffer_info *br,
- const char *name);
-
-int hv_vmbus_ring_buffer_init(
- hv_vmbus_ring_buffer_info *ring_info,
- void *buffer,
- uint32_t buffer_len);
-
-void hv_ring_buffer_cleanup(
- hv_vmbus_ring_buffer_info *ring_info);
-
-int hv_ring_buffer_write(
- hv_vmbus_ring_buffer_info *ring_info,
- const struct iovec iov[],
- uint32_t iovlen,
- boolean_t *need_sig);
-
-int hv_ring_buffer_peek(
- hv_vmbus_ring_buffer_info *ring_info,
- void *buffer,
- uint32_t buffer_len);
-
-int hv_ring_buffer_read(
- hv_vmbus_ring_buffer_info *ring_info,
- void *buffer,
- uint32_t buffer_len,
- uint32_t offset);
-
-void hv_ring_buffer_read_begin(
- hv_vmbus_ring_buffer_info *ring_info);
-
-uint32_t hv_ring_buffer_read_end(
- hv_vmbus_ring_buffer_info *ring_info);
-
-#endif /* __HYPERV_PRIV_H__ */
diff --git a/sys/dev/hyperv/vmbus/hyperv.c b/sys/dev/hyperv/vmbus/hyperv.c
index 3040e533b23a..2df313b73507 100644
--- a/sys/dev/hyperv/vmbus/hyperv.c
+++ b/sys/dev/hyperv/vmbus/hyperv.c
@@ -34,21 +34,14 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/pcpu.h>
+#include <sys/systm.h>
#include <sys/timetc.h>
-#include <machine/bus.h>
-#include <machine/md_var.h>
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/pmap.h>
+#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/hyperv_busdma.h>
-#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
#include <dev/hyperv/vmbus/hyperv_machdep.h>
#include <dev/hyperv/vmbus/hyperv_reg.h>
#include <dev/hyperv/vmbus/hyperv_var.h>
-#include <dev/hyperv/vmbus/vmbus_var.h>
#define HYPERV_FREEBSD_BUILD 0ULL
#define HYPERV_FREEBSD_VERSION ((uint64_t)__FreeBSD_version)
@@ -74,13 +67,15 @@ struct hypercall_ctx {
struct hyperv_dma hc_dma;
};
-static u_int hyperv_get_timecount(struct timecounter *tc);
+static u_int hyperv_get_timecount(struct timecounter *);
+static bool hyperv_identify(void);
+static void hypercall_memfree(void);
-u_int hyperv_features;
-u_int hyperv_recommends;
+u_int hyperv_features;
+u_int hyperv_recommends;
-static u_int hyperv_pm_features;
-static u_int hyperv_features3;
+static u_int hyperv_pm_features;
+static u_int hyperv_features3;
static struct timecounter hyperv_timecounter = {
.tc_get_timecount = hyperv_get_timecount,
diff --git a/sys/dev/hyperv/vmbus/hyperv_reg.h b/sys/dev/hyperv/vmbus/hyperv_reg.h
index 060137491c60..97969585e6aa 100644
--- a/sys/dev/hyperv/vmbus/hyperv_reg.h
+++ b/sys/dev/hyperv/vmbus/hyperv_reg.h
@@ -30,6 +30,7 @@
#define _HYPERV_REG_H_
#include <sys/param.h>
+#include <sys/systm.h>
/*
* Hyper-V Synthetic MSRs
diff --git a/sys/dev/hyperv/vmbus/vmbus.c b/sys/dev/hyperv/vmbus/vmbus.c
index 2d7f3a0da535..6753c9e9b524 100644
--- a/sys/dev/hyperv/vmbus/vmbus.c
+++ b/sys/dev/hyperv/vmbus/vmbus.c
@@ -38,59 +38,45 @@ __FBSDID("$FreeBSD$");
#include <sys/lock.h>
#include <sys/malloc.h>
#include <sys/module.h>
-#include <sys/proc.h>
+#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
-#include <sys/syslog.h>
#include <sys/systm.h>
-#include <sys/rtprio.h>
-#include <sys/interrupt.h>
-#include <sys/sx.h>
#include <sys/taskqueue.h>
-#include <sys/mutex.h>
-#include <sys/smp.h>
-
-#include <machine/resource.h>
-#include <sys/rman.h>
-#include <machine/stdarg.h>
#include <machine/intr_machdep.h>
-#include <machine/md_var.h>
-#include <machine/segments.h>
-#include <sys/pcpu.h>
-#include <x86/apicvar.h>
+#include <x86/include/apicvar.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
#include <dev/hyperv/include/hyperv.h>
-#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
+#include <dev/hyperv/include/vmbus_xact.h>
#include <dev/hyperv/vmbus/hyperv_reg.h>
#include <dev/hyperv/vmbus/hyperv_var.h>
#include <dev/hyperv/vmbus/vmbus_reg.h>
#include <dev/hyperv/vmbus/vmbus_var.h>
+#include <dev/hyperv/vmbus/vmbus_chanvar.h>
-#include <contrib/dev/acpica/include/acpi.h>
#include "acpi_if.h"
#include "vmbus_if.h"
#define VMBUS_GPADL_START 0xe1e10
struct vmbus_msghc {
- struct hypercall_postmsg_in *mh_inprm;
+ struct vmbus_xact *mh_xact;
struct hypercall_postmsg_in mh_inprm_save;
- struct hyperv_dma mh_inprm_dma;
-
- struct vmbus_message *mh_resp;
- struct vmbus_message mh_resp0;
-};
-
-struct vmbus_msghc_ctx {
- struct vmbus_msghc *mhc_free;
- struct mtx mhc_free_lock;
- uint32_t mhc_flags;
-
- struct vmbus_msghc *mhc_active;
- struct mtx mhc_active_lock;
};
-#define VMBUS_MSGHC_CTXF_DESTROY 0x0001
+static int vmbus_probe(device_t);
+static int vmbus_attach(device_t);
+static int vmbus_detach(device_t);
+static int vmbus_read_ivar(device_t, device_t, int,
+ uintptr_t *);
+static int vmbus_child_pnpinfo_str(device_t, device_t,
+ char *, size_t);
+static uint32_t vmbus_get_version_method(device_t, device_t);
+static int vmbus_probe_guid_method(device_t, device_t,
+ const struct hyperv_guid *);
static int vmbus_init(struct vmbus_softc *);
static int vmbus_connect(struct vmbus_softc *, uint32_t);
@@ -104,19 +90,19 @@ static void vmbus_scan_done(struct vmbus_softc *,
const struct vmbus_message *);
static void vmbus_chanmsg_handle(struct vmbus_softc *,
const struct vmbus_message *);
-
+static void vmbus_msg_task(void *, int);
+static void vmbus_synic_setup(void *);
+static void vmbus_synic_teardown(void *);
static int vmbus_sysctl_version(SYSCTL_HANDLER_ARGS);
+static int vmbus_dma_alloc(struct vmbus_softc *);
+static void vmbus_dma_free(struct vmbus_softc *);
+static int vmbus_intr_setup(struct vmbus_softc *);
+static void vmbus_intr_teardown(struct vmbus_softc *);
+static int vmbus_doattach(struct vmbus_softc *);
+static void vmbus_event_proc_dummy(struct vmbus_softc *,
+ int);
-static struct vmbus_msghc_ctx *vmbus_msghc_ctx_create(bus_dma_tag_t);
-static void vmbus_msghc_ctx_destroy(
- struct vmbus_msghc_ctx *);
-static void vmbus_msghc_ctx_free(struct vmbus_msghc_ctx *);
-static struct vmbus_msghc *vmbus_msghc_alloc(bus_dma_tag_t);
-static void vmbus_msghc_free(struct vmbus_msghc *);
-static struct vmbus_msghc *vmbus_msghc_get1(struct vmbus_msghc_ctx *,
- uint32_t);
-
-struct vmbus_softc *vmbus_sc;
+static struct vmbus_softc *vmbus_sc;
extern inthand_t IDTVEC(vmbus_isr);
@@ -133,83 +119,44 @@ vmbus_chanmsg_handlers[VMBUS_CHANMSG_TYPE_MAX] = {
VMBUS_CHANMSG_PROC_WAKEUP(CONNECT_RESP)
};
-static struct vmbus_msghc *
-vmbus_msghc_alloc(bus_dma_tag_t parent_dtag)
-{
- struct vmbus_msghc *mh;
-
- mh = malloc(sizeof(*mh), M_DEVBUF, M_WAITOK | M_ZERO);
-
- mh->mh_inprm = hyperv_dmamem_alloc(parent_dtag,
- HYPERCALL_PARAM_ALIGN, 0, HYPERCALL_POSTMSGIN_SIZE,
- &mh->mh_inprm_dma, BUS_DMA_WAITOK);
- if (mh->mh_inprm == NULL) {
- free(mh, M_DEVBUF);
- return NULL;
- }
- return mh;
-}
+static device_method_t vmbus_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, vmbus_probe),
+ DEVMETHOD(device_attach, vmbus_attach),
+ DEVMETHOD(device_detach, vmbus_detach),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
-static void
-vmbus_msghc_free(struct vmbus_msghc *mh)
-{
- hyperv_dmamem_free(&mh->mh_inprm_dma, mh->mh_inprm);
- free(mh, M_DEVBUF);
-}
+ /* Bus interface */
+ DEVMETHOD(bus_add_child, bus_generic_add_child),
+ DEVMETHOD(bus_print_child, bus_generic_print_child),
+ DEVMETHOD(bus_read_ivar, vmbus_read_ivar),
+ DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str),
-static void
-vmbus_msghc_ctx_free(struct vmbus_msghc_ctx *mhc)
-{
- KASSERT(mhc->mhc_active == NULL, ("still have active msg hypercall"));
- KASSERT(mhc->mhc_free == NULL, ("still have hypercall msg"));
+ /* Vmbus interface */
+ DEVMETHOD(vmbus_get_version, vmbus_get_version_method),
+ DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method),
- mtx_destroy(&mhc->mhc_free_lock);
- mtx_destroy(&mhc->mhc_active_lock);
- free(mhc, M_DEVBUF);
-}
+ DEVMETHOD_END
+};
-static struct vmbus_msghc_ctx *
-vmbus_msghc_ctx_create(bus_dma_tag_t parent_dtag)
-{
- struct vmbus_msghc_ctx *mhc;
+static driver_t vmbus_driver = {
+ "vmbus",
+ vmbus_methods,
+ sizeof(struct vmbus_softc)
+};
- mhc = malloc(sizeof(*mhc), M_DEVBUF, M_WAITOK | M_ZERO);
- mtx_init(&mhc->mhc_free_lock, "vmbus msghc free", NULL, MTX_DEF);
- mtx_init(&mhc->mhc_active_lock, "vmbus msghc act", NULL, MTX_DEF);
+static devclass_t vmbus_devclass;
- mhc->mhc_free = vmbus_msghc_alloc(parent_dtag);
- if (mhc->mhc_free == NULL) {
- vmbus_msghc_ctx_free(mhc);
- return NULL;
- }
- return mhc;
-}
+DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, NULL, NULL);
+MODULE_DEPEND(vmbus, acpi, 1, 1, 1);
+MODULE_VERSION(vmbus, 1);
-static struct vmbus_msghc *
-vmbus_msghc_get1(struct vmbus_msghc_ctx *mhc, uint32_t dtor_flag)
+static __inline struct vmbus_softc *
+vmbus_get_softc(void)
{
- struct vmbus_msghc *mh;
-
- mtx_lock(&mhc->mhc_free_lock);
-
- while ((mhc->mhc_flags & dtor_flag) == 0 && mhc->mhc_free == NULL) {
- mtx_sleep(&mhc->mhc_free, &mhc->mhc_free_lock, 0,
- "gmsghc", 0);
- }
- if (mhc->mhc_flags & dtor_flag) {
- /* Being destroyed */
- mh = NULL;
- } else {
- mh = mhc->mhc_free;
- KASSERT(mh != NULL, ("no free hypercall msg"));
- KASSERT(mh->mh_resp == NULL,
- ("hypercall msg has pending response"));
- mhc->mhc_free = NULL;
- }
-
- mtx_unlock(&mhc->mhc_free_lock);
-
- return mh;
+ return vmbus_sc;
}
void
@@ -220,7 +167,7 @@ vmbus_msghc_reset(struct vmbus_msghc *mh, size_t dsize)
if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
panic("invalid data size %zu", dsize);
- inprm = mh->mh_inprm;
+ inprm = vmbus_xact_req_data(mh->mh_xact);
memset(inprm, 0, HYPERCALL_POSTMSGIN_SIZE);
inprm->hc_connid = VMBUS_CONNID_MESSAGE;
inprm->hc_msgtype = HYPERV_MSGTYPE_CHANNEL;
@@ -231,63 +178,50 @@ struct vmbus_msghc *
vmbus_msghc_get(struct vmbus_softc *sc, size_t dsize)
{
struct vmbus_msghc *mh;
+ struct vmbus_xact *xact;
if (dsize > HYPERCALL_POSTMSGIN_DSIZE_MAX)
panic("invalid data size %zu", dsize);
- mh = vmbus_msghc_get1(sc->vmbus_msg_hc, VMBUS_MSGHC_CTXF_DESTROY);
- if (mh == NULL)
- return NULL;
+ xact = vmbus_xact_get(sc->vmbus_xc,
+ dsize + __offsetof(struct hypercall_postmsg_in, hc_data[0]));
+ if (xact == NULL)
+ return (NULL);
+
+ mh = vmbus_xact_priv(xact, sizeof(*mh));
+ mh->mh_xact = xact;
vmbus_msghc_reset(mh, dsize);
- return mh;
+ return (mh);
}
void
-vmbus_msghc_put(struct vmbus_softc *sc, struct vmbus_msghc *mh)
+vmbus_msghc_put(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
{
- struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
-
- KASSERT(mhc->mhc_active == NULL, ("msg hypercall is active"));
- mh->mh_resp = NULL;
- mtx_lock(&mhc->mhc_free_lock);
- KASSERT(mhc->mhc_free == NULL, ("has free hypercall msg"));
- mhc->mhc_free = mh;
- mtx_unlock(&mhc->mhc_free_lock);
- wakeup(&mhc->mhc_free);
+ vmbus_xact_put(mh->mh_xact);
}
void *
vmbus_msghc_dataptr(struct vmbus_msghc *mh)
{
- return mh->mh_inprm->hc_data;
-}
-
-static void
-vmbus_msghc_ctx_destroy(struct vmbus_msghc_ctx *mhc)
-{
- struct vmbus_msghc *mh;
-
- mtx_lock(&mhc->mhc_free_lock);
- mhc->mhc_flags |= VMBUS_MSGHC_CTXF_DESTROY;
- mtx_unlock(&mhc->mhc_free_lock);
- wakeup(&mhc->mhc_free);
-
- mh = vmbus_msghc_get1(mhc, 0);
- if (mh == NULL)
- panic("can't get msghc");
+ struct hypercall_postmsg_in *inprm;
- vmbus_msghc_free(mh);
- vmbus_msghc_ctx_free(mhc);
+ inprm = vmbus_xact_req_data(mh->mh_xact);
+ return (inprm->hc_data);
}
int
vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
{
sbintime_t time = SBT_1MS;
+ struct hypercall_postmsg_in *inprm;
+ bus_addr_t inprm_paddr;
int i;
+ inprm = vmbus_xact_req_data(mh->mh_xact);
+ inprm_paddr = vmbus_xact_req_paddr(mh->mh_xact);
+
/*
* Save the input parameter so that we could restore the input
* parameter if the Hypercall failed.
@@ -296,7 +230,7 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
* Is this really necessary?! i.e. Will the Hypercall ever
* overwrite the input parameter?
*/
- memcpy(&mh->mh_inprm_save, mh->mh_inprm, HYPERCALL_POSTMSGIN_SIZE);
+ memcpy(&mh->mh_inprm_save, inprm, HYPERCALL_POSTMSGIN_SIZE);
/*
* In order to cope with transient failures, e.g. insufficient
@@ -308,7 +242,7 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
for (i = 0; i < HC_RETRY_MAX; ++i) {
uint64_t status;
- status = hypercall_post_message(mh->mh_inprm_dma.hv_paddr);
+ status = hypercall_post_message(inprm_paddr);
if (status == HYPERCALL_STATUS_SUCCESS)
return 0;
@@ -317,8 +251,7 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
time *= 2;
/* Restore input parameter and try again */
- memcpy(mh->mh_inprm, &mh->mh_inprm_save,
- HYPERCALL_POSTMSGIN_SIZE);
+ memcpy(inprm, &mh->mh_inprm_save, HYPERCALL_POSTMSGIN_SIZE);
}
#undef HC_RETRY_MAX
@@ -327,62 +260,30 @@ vmbus_msghc_exec_noresult(struct vmbus_msghc *mh)
}
int
-vmbus_msghc_exec(struct vmbus_softc *sc, struct vmbus_msghc *mh)
+vmbus_msghc_exec(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
{
- struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
int error;
- KASSERT(mh->mh_resp == NULL, ("hypercall msg has pending response"));
-
- mtx_lock(&mhc->mhc_active_lock);
- KASSERT(mhc->mhc_active == NULL, ("pending active msg hypercall"));
- mhc->mhc_active = mh;
- mtx_unlock(&mhc->mhc_active_lock);
-
+ vmbus_xact_activate(mh->mh_xact);
error = vmbus_msghc_exec_noresult(mh);
- if (error) {
- mtx_lock(&mhc->mhc_active_lock);
- KASSERT(mhc->mhc_active == mh, ("msghc mismatch"));
- mhc->mhc_active = NULL;
- mtx_unlock(&mhc->mhc_active_lock);
- }
+ if (error)
+ vmbus_xact_deactivate(mh->mh_xact);
return error;
}
const struct vmbus_message *
-vmbus_msghc_wait_result(struct vmbus_softc *sc, struct vmbus_msghc *mh)
+vmbus_msghc_wait_result(struct vmbus_softc *sc __unused, struct vmbus_msghc *mh)
{
- struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
-
- mtx_lock(&mhc->mhc_active_lock);
-
- KASSERT(mhc->mhc_active == mh, ("msghc mismatch"));
- while (mh->mh_resp == NULL) {
- mtx_sleep(&mhc->mhc_active, &mhc->mhc_active_lock, 0,
- "wmsghc", 0);
- }
- mhc->mhc_active = NULL;
+ size_t resp_len;
- mtx_unlock(&mhc->mhc_active_lock);
-
- return mh->mh_resp;
+ return (vmbus_xact_wait(mh->mh_xact, &resp_len));
}
void
vmbus_msghc_wakeup(struct vmbus_softc *sc, const struct vmbus_message *msg)
{
- struct vmbus_msghc_ctx *mhc = sc->vmbus_msg_hc;
- struct vmbus_msghc *mh;
-
- mtx_lock(&mhc->mhc_active_lock);
- mh = mhc->mhc_active;
- KASSERT(mh != NULL, ("no pending msg hypercall"));
- memcpy(&mh->mh_resp0, msg, sizeof(mh->mh_resp0));
- mh->mh_resp = &mh->mh_resp0;
-
- mtx_unlock(&mhc->mhc_active_lock);
- wakeup(&mhc->mhc_active);
+ vmbus_xact_ctx_wakeup(sc->vmbus_xc, msg, sizeof(*msg));
}
uint32_t
@@ -1138,9 +1039,10 @@ vmbus_doattach(struct vmbus_softc *sc)
/*
* Create context for "post message" Hypercalls
*/
- sc->vmbus_msg_hc = vmbus_msghc_ctx_create(
- bus_get_dma_tag(sc->vmbus_dev));
- if (sc->vmbus_msg_hc == NULL) {
+ sc->vmbus_xc = vmbus_xact_ctx_create(bus_get_dma_tag(sc->vmbus_dev),
+ HYPERCALL_POSTMSGIN_SIZE, VMBUS_MSG_SIZE,
+ sizeof(struct vmbus_msghc));
+ if (sc->vmbus_xc == NULL) {
ret = ENXIO;
goto cleanup;
}
@@ -1195,9 +1097,9 @@ vmbus_doattach(struct vmbus_softc *sc)
cleanup:
vmbus_intr_teardown(sc);
vmbus_dma_free(sc);
- if (sc->vmbus_msg_hc != NULL) {
- vmbus_msghc_ctx_destroy(sc->vmbus_msg_hc);
- sc->vmbus_msg_hc = NULL;
+ if (sc->vmbus_xc != NULL) {
+ vmbus_xact_ctx_destroy(sc->vmbus_xc);
+ sc->vmbus_xc = NULL;
}
free(sc->vmbus_chmap, M_DEVBUF);
mtx_destroy(&sc->vmbus_scan_lock);
@@ -1239,26 +1141,6 @@ vmbus_attach(device_t dev)
return (0);
}
-static void
-vmbus_sysinit(void *arg __unused)
-{
- struct vmbus_softc *sc = vmbus_get_softc();
-
- if (vm_guest != VM_GUEST_HV || sc == NULL)
- return;
-
-#ifndef EARLY_AP_STARTUP
- /*
- * If the system has already booted and thread
- * scheduling is possible, as indicated by the
- * global cold set to zero, we just call the driver
- * initialization directly.
- */
- if (!cold)
-#endif
- vmbus_doattach(sc);
-}
-
static int
vmbus_detach(device_t dev)
{
@@ -1276,9 +1158,9 @@ vmbus_detach(device_t dev)
vmbus_intr_teardown(sc);
vmbus_dma_free(sc);
- if (sc->vmbus_msg_hc != NULL) {
- vmbus_msghc_ctx_destroy(sc->vmbus_msg_hc);
- sc->vmbus_msg_hc = NULL;
+ if (sc->vmbus_xc != NULL) {
+ vmbus_xact_ctx_destroy(sc->vmbus_xc);
+ sc->vmbus_xc = NULL;
}
free(sc->vmbus_chmap, M_DEVBUF);
@@ -1288,45 +1170,30 @@ vmbus_detach(device_t dev)
return (0);
}
-static device_method_t vmbus_methods[] = {
- /* Device interface */
- DEVMETHOD(device_probe, vmbus_probe),
- DEVMETHOD(device_attach, vmbus_attach),
- DEVMETHOD(device_detach, vmbus_detach),
- DEVMETHOD(device_shutdown, bus_generic_shutdown),
- DEVMETHOD(device_suspend, bus_generic_suspend),
- DEVMETHOD(device_resume, bus_generic_resume),
-
- /* Bus interface */
- DEVMETHOD(bus_add_child, bus_generic_add_child),
- DEVMETHOD(bus_print_child, bus_generic_print_child),
- DEVMETHOD(bus_read_ivar, vmbus_read_ivar),
- DEVMETHOD(bus_child_pnpinfo_str, vmbus_child_pnpinfo_str),
-
- /* Vmbus interface */
- DEVMETHOD(vmbus_get_version, vmbus_get_version_method),
- DEVMETHOD(vmbus_probe_guid, vmbus_probe_guid_method),
-
- DEVMETHOD_END
-};
-
-static driver_t vmbus_driver = {
- "vmbus",
- vmbus_methods,
- sizeof(struct vmbus_softc)
-};
+#ifndef EARLY_AP_STARTUP
-static devclass_t vmbus_devclass;
+static void
+vmbus_sysinit(void *arg __unused)
+{
+ struct vmbus_softc *sc = vmbus_get_softc();
-DRIVER_MODULE(vmbus, acpi, vmbus_driver, vmbus_devclass, NULL, NULL);
-MODULE_DEPEND(vmbus, acpi, 1, 1, 1);
-MODULE_VERSION(vmbus, 1);
+ if (vm_guest != VM_GUEST_HV || sc == NULL)
+ return;
-#ifndef EARLY_AP_STARTUP
+ /*
+ * If the system has already booted and thread
+ * scheduling is possible, as indicated by the
+ * global cold set to zero, we just call the driver
+ * initialization directly.
+ */
+ if (!cold)
+ vmbus_doattach(sc);
+}
/*
* NOTE:
* We have to start as the last step of SI_SUB_SMP, i.e. after SMP is
* initialized.
*/
SYSINIT(vmbus_initialize, SI_SUB_SMP, SI_ORDER_ANY, vmbus_sysinit, NULL);
-#endif
+
+#endif /* !EARLY_AP_STARTUP */
diff --git a/sys/dev/hyperv/vmbus/vmbus_br.c b/sys/dev/hyperv/vmbus/vmbus_br.c
new file mode 100644
index 000000000000..99f4f8826b17
--- /dev/null
+++ b/sys/dev/hyperv/vmbus/vmbus_br.c
@@ -0,0 +1,404 @@
+/*-
+ * Copyright (c) 2009-2012,2016 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/sysctl.h>
+
+#include <dev/hyperv/vmbus/vmbus_reg.h>
+#include <dev/hyperv/vmbus/vmbus_brvar.h>
+
+/* Amount of space available for write */
+#define VMBUS_BR_WAVAIL(r, w, z) \
+ (((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w)))
+
+/* Increase bufing index */
+#define VMBUS_BR_IDXINC(idx, inc, sz) (((idx) + (inc)) % (sz))
+
+static int vmbus_br_sysctl_state(SYSCTL_HANDLER_ARGS);
+static int vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS);
+static void vmbus_br_setup(struct vmbus_br *, void *, int);
+
+static int
+vmbus_br_sysctl_state(SYSCTL_HANDLER_ARGS)
+{
+ const struct vmbus_br *br = arg1;
+ uint32_t rindex, windex, imask, ravail, wavail;
+ char state[256];
+
+ rindex = br->vbr_rindex;
+ windex = br->vbr_windex;
+ imask = br->vbr_imask;
+ wavail = VMBUS_BR_WAVAIL(rindex, windex, br->vbr_dsize);
+ ravail = br->vbr_dsize - wavail;
+
+ snprintf(state, sizeof(state),
+ "rindex:%u windex:%u imask:%u ravail:%u wavail:%u",
+ rindex, windex, imask, ravail, wavail);
+ return sysctl_handle_string(oidp, state, sizeof(state), req);
+}
+
+/*
+ * Binary bufring states.
+ */
+static int
+vmbus_br_sysctl_state_bin(SYSCTL_HANDLER_ARGS)
+{
+#define BR_STATE_RIDX 0
+#define BR_STATE_WIDX 1
+#define BR_STATE_IMSK 2
+#define BR_STATE_RSPC 3
+#define BR_STATE_WSPC 4
+#define BR_STATE_MAX 5
+
+ const struct vmbus_br *br = arg1;
+ uint32_t rindex, windex, wavail, state[BR_STATE_MAX];
+
+ rindex = br->vbr_rindex;
+ windex = br->vbr_windex;
+ wavail = VMBUS_BR_WAVAIL(rindex, windex, br->vbr_dsize);
+
+ state[BR_STATE_RIDX] = rindex;
+ state[BR_STATE_WIDX] = windex;
+ state[BR_STATE_IMSK] = br->vbr_imask;
+ state[BR_STATE_WSPC] = wavail;
+ state[BR_STATE_RSPC] = br->vbr_dsize - wavail;
+
+ return sysctl_handle_opaque(oidp, state, sizeof(state), req);
+}
+
+void
+vmbus_br_sysctl_create(struct sysctl_ctx_list *ctx, struct sysctl_oid *br_tree,
+ struct vmbus_br *br, const char *name)
+{
+ struct sysctl_oid *tree;
+ char desc[64];
+
+ tree = SYSCTL_ADD_NODE(ctx, SYSCTL_CHILDREN(br_tree), OID_AUTO,
+ name, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, "");
+ if (tree == NULL)
+ return;
+
+ snprintf(desc, sizeof(desc), "%s state", name);
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state",
+ CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ br, 0, vmbus_br_sysctl_state, "A", desc);
+
+ snprintf(desc, sizeof(desc), "%s binary state", name);
+ SYSCTL_ADD_PROC(ctx, SYSCTL_CHILDREN(tree), OID_AUTO, "state_bin",
+ CTLTYPE_OPAQUE | CTLFLAG_RD | CTLFLAG_MPSAFE,
+ br, 0, vmbus_br_sysctl_state_bin, "IU", desc);
+}
+
+void
+vmbus_rxbr_intr_mask(struct vmbus_rxbr *rbr)
+{
+ rbr->rxbr_imask = 1;
+ mb();
+}
+
+static __inline uint32_t
+vmbus_rxbr_avail(const struct vmbus_rxbr *rbr)
+{
+ uint32_t rindex, windex;
+
+ /* Get snapshot */
+ rindex = rbr->rxbr_rindex;
+ windex = rbr->rxbr_windex;
+
+ return (rbr->rxbr_dsize -
+ VMBUS_BR_WAVAIL(rindex, windex, rbr->rxbr_dsize));
+}
+
+uint32_t
+vmbus_rxbr_intr_unmask(struct vmbus_rxbr *rbr)
+{
+ rbr->rxbr_imask = 0;
+ mb();
+
+ /*
+ * Now check to see if the ring buffer is still empty.
+ * If it is not, we raced and we need to process new
+ * incoming channel packets.
+ */
+ return vmbus_rxbr_avail(rbr);
+}
+
+static void
+vmbus_br_setup(struct vmbus_br *br, void *buf, int blen)
+{
+ br->vbr = buf;
+ br->vbr_dsize = blen - sizeof(struct vmbus_bufring);
+}
+
+void
+vmbus_rxbr_init(struct vmbus_rxbr *rbr)
+{
+ mtx_init(&rbr->rxbr_lock, "vmbus_rxbr", NULL, MTX_SPIN);
+}
+
+void
+vmbus_rxbr_deinit(struct vmbus_rxbr *rbr)
+{
+ mtx_destroy(&rbr->rxbr_lock);
+}
+
+void
+vmbus_rxbr_setup(struct vmbus_rxbr *rbr, void *buf, int blen)
+{
+ vmbus_br_setup(&rbr->rxbr, buf, blen);
+}
+
+void
+vmbus_txbr_init(struct vmbus_txbr *tbr)
+{
+ mtx_init(&tbr->txbr_lock, "vmbus_txbr", NULL, MTX_SPIN);
+}
+
+void
+vmbus_txbr_deinit(struct vmbus_txbr *tbr)
+{
+ mtx_destroy(&tbr->txbr_lock);
+}
+
+void
+vmbus_txbr_setup(struct vmbus_txbr *tbr, void *buf, int blen)
+{
+ vmbus_br_setup(&tbr->txbr, buf, blen);
+}
+
+/*
+ * When we write to the ring buffer, check if the host needs to be
+ * signaled.
+ *
+ * The contract:
+ * - The host guarantees that while it is draining the TX bufring,
+ * it will set the br_imask to indicate it does not need to be
+ * interrupted when new data are added.
+ * - The host guarantees that it will completely drain the TX bufring
+ * before exiting the read loop. Further, once the TX bufring is
+ * empty, it will clear the br_imask and re-check to see if new
+ * data have arrived.
+ */
+static __inline boolean_t
+vmbus_txbr_need_signal(const struct vmbus_txbr *tbr, uint32_t old_windex)
+{
+ mb();
+ if (tbr->txbr_imask)
+ return (FALSE);
+
+ __compiler_membar();
+
+ /*
+ * This is the only case we need to signal when the
+ * ring transitions from being empty to non-empty.
+ */
+ if (old_windex == tbr->txbr_rindex)
+ return (TRUE);
+
+ return (FALSE);
+}
+
+static __inline uint32_t
+vmbus_txbr_avail(const struct vmbus_txbr *tbr)
+{
+ uint32_t rindex, windex;
+
+ /* Get snapshot */
+ rindex = tbr->txbr_rindex;
+ windex = tbr->txbr_windex;
+
+ return VMBUS_BR_WAVAIL(rindex, windex, tbr->txbr_dsize);
+}
+
+static __inline uint32_t
+vmbus_txbr_copyto(const struct vmbus_txbr *tbr, uint32_t windex,
+ const void *src0, uint32_t cplen)
+{
+ const uint8_t *src = src0;
+ uint8_t *br_data = tbr->txbr_data;
+ uint32_t br_dsize = tbr->txbr_dsize;
+
+ if (cplen > br_dsize - windex) {
+ uint32_t fraglen = br_dsize - windex;
+
+ /* Wrap-around detected */
+ memcpy(br_data + windex, src, fraglen);
+ memcpy(br_data, src + fraglen, cplen - fraglen);
+ } else {
+ memcpy(br_data + windex, src, cplen);
+ }
+ return VMBUS_BR_IDXINC(windex, cplen, br_dsize);
+}
+
+/*
+ * Write scattered channel packet to TX bufring.
+ *
+ * The offset of this channel packet is written as a 64bits value
+ * immediately after this channel packet.
+ */
+int
+vmbus_txbr_write(struct vmbus_txbr *tbr, const struct iovec iov[], int iovlen,
+ boolean_t *need_sig)
+{
+ uint32_t old_windex, windex, total;
+ uint64_t save_windex;
+ int i;
+
+ total = 0;
+ for (i = 0; i < iovlen; i++)
+ total += iov[i].iov_len;
+ total += sizeof(save_windex);
+
+ mtx_lock_spin(&tbr->txbr_lock);
+
+ /*
+ * NOTE:
+ * If this write is going to make br_windex same as br_rindex,
+ * i.e. the available space for write is same as the write size,
+ * we can't do it then, since br_windex == br_rindex means that
+ * the bufring is empty.
+ */
+ if (vmbus_txbr_avail(tbr) <= total) {
+ mtx_unlock_spin(&tbr->txbr_lock);
+ return (EAGAIN);
+ }
+
+ /* Save br_windex for later use */
+ old_windex = tbr->txbr_windex;
+
+ /*
+ * Copy the scattered channel packet to the TX bufring.
+ */
+ windex = old_windex;
+ for (i = 0; i < iovlen; i++) {
+ windex = vmbus_txbr_copyto(tbr, windex,
+ iov[i].iov_base, iov[i].iov_len);
+ }
+
+ /*
+ * Set the offset of the current channel packet.
+ */
+ save_windex = ((uint64_t)old_windex) << 32;
+ windex = vmbus_txbr_copyto(tbr, windex, &save_windex,
+ sizeof(save_windex));
+
+ /*
+ * Update the write index _after_ the channel packet
+ * is copied.
+ */
+ __compiler_membar();
+ tbr->txbr_windex = windex;
+
+ mtx_unlock_spin(&tbr->txbr_lock);
+
+ *need_sig = vmbus_txbr_need_signal(tbr, old_windex);
+
+ return (0);
+}
+
+static __inline uint32_t
+vmbus_rxbr_copyfrom(const struct vmbus_rxbr *rbr, uint32_t rindex,
+ void *dst0, int cplen)
+{
+ uint8_t *dst = dst0;
+ const uint8_t *br_data = rbr->rxbr_data;
+ uint32_t br_dsize = rbr->rxbr_dsize;
+
+ if (cplen > br_dsize - rindex) {
+ uint32_t fraglen = br_dsize - rindex;
+
+ /* Wrap-around detected. */
+ memcpy(dst, br_data + rindex, fraglen);
+ memcpy(dst + fraglen, br_data, cplen - fraglen);
+ } else {
+ memcpy(dst, br_data + rindex, cplen);
+ }
+ return VMBUS_BR_IDXINC(rindex, cplen, br_dsize);
+}
+
+int
+vmbus_rxbr_peek(struct vmbus_rxbr *rbr, void *data, int dlen)
+{
+ mtx_lock_spin(&rbr->rxbr_lock);
+
+ /*
+ * The requested data and the 64bits channel packet
+ * offset should be there at least.
+ */
+ if (vmbus_rxbr_avail(rbr) < dlen + sizeof(uint64_t)) {
+ mtx_unlock_spin(&rbr->rxbr_lock);
+ return (EAGAIN);
+ }
+ vmbus_rxbr_copyfrom(rbr, rbr->rxbr_rindex, data, dlen);
+
+ mtx_unlock_spin(&rbr->rxbr_lock);
+
+ return (0);
+}
+
+/*
+ * NOTE:
+ * We assume (dlen + skip) == sizeof(channel packet).
+ */
+int
+vmbus_rxbr_read(struct vmbus_rxbr *rbr, void *data, int dlen, uint32_t skip)
+{
+ uint32_t rindex, br_dsize = rbr->rxbr_dsize;
+
+ KASSERT(dlen + skip > 0, ("invalid dlen %d, offset %u", dlen, skip));
+
+ mtx_lock_spin(&rbr->rxbr_lock);
+
+ if (vmbus_rxbr_avail(rbr) < dlen + skip + sizeof(uint64_t)) {
+ mtx_unlock_spin(&rbr->rxbr_lock);
+ return (EAGAIN);
+ }
+
+ /*
+ * Copy channel packet from RX bufring.
+ */
+ rindex = VMBUS_BR_IDXINC(rbr->rxbr_rindex, skip, br_dsize);
+ rindex = vmbus_rxbr_copyfrom(rbr, rindex, data, dlen);
+
+ /*
+ * Discard this channel packet's 64bits offset, which is useless to us.
+ */
+ rindex = VMBUS_BR_IDXINC(rindex, sizeof(uint64_t), br_dsize);
+
+ /*
+ * Update the read index _after_ the channel packet is fetched.
+ */
+ __compiler_membar();
+ rbr->rxbr_rindex = rindex;
+
+ mtx_unlock_spin(&rbr->rxbr_lock);
+
+ return (0);
+}
diff --git a/sys/dev/hyperv/vmbus/vmbus_brvar.h b/sys/dev/hyperv/vmbus/vmbus_brvar.h
new file mode 100644
index 000000000000..da0ca9d8f535
--- /dev/null
+++ b/sys/dev/hyperv/vmbus/vmbus_brvar.h
@@ -0,0 +1,104 @@
+/*-
+ * Copyright (c) 2009-2012,2016 Microsoft Corp.
+ * Copyright (c) 2012 NetApp Inc.
+ * Copyright (c) 2012 Citrix Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+#ifndef _VMBUS_BRVAR_H_
+#define _VMBUS_BRVAR_H_
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/mutex.h>
+#include <sys/_iovec.h>
+
+struct vmbus_br {
+ struct vmbus_bufring *vbr;
+ uint32_t vbr_dsize; /* total data size */
+};
+
+#define vbr_windex vbr->br_windex
+#define vbr_rindex vbr->br_rindex
+#define vbr_imask vbr->br_imask
+#define vbr_data vbr->br_data
+
+struct vmbus_rxbr {
+ struct mtx rxbr_lock;
+ struct vmbus_br rxbr;
+};
+
+#define rxbr_windex rxbr.vbr_windex
+#define rxbr_rindex rxbr.vbr_rindex
+#define rxbr_imask rxbr.vbr_imask
+#define rxbr_data rxbr.vbr_data
+#define rxbr_dsize rxbr.vbr_dsize
+
+struct vmbus_txbr {
+ struct mtx txbr_lock;
+ struct vmbus_br txbr;
+};
+
+#define txbr_windex txbr.vbr_windex
+#define txbr_rindex txbr.vbr_rindex
+#define txbr_imask txbr.vbr_imask
+#define txbr_data txbr.vbr_data
+#define txbr_dsize txbr.vbr_dsize
+
+struct sysctl_ctx_list;
+struct sysctl_oid;
+
+static __inline int
+vmbus_txbr_maxpktsz(const struct vmbus_txbr *tbr)
+{
+ /*
+ * - 64 bits for the trailing start index (- sizeof(uint64_t)).
+ * - The rindex and windex can't be same (- 1). See
+ * the comment near vmbus_bufring.br_{r,w}index.
+ */
+ return (tbr->txbr_dsize - sizeof(uint64_t) - 1);
+}
+
+void vmbus_br_sysctl_create(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid *br_tree, struct vmbus_br *br,
+ const char *name);
+
+void vmbus_rxbr_init(struct vmbus_rxbr *rbr);
+void vmbus_rxbr_deinit(struct vmbus_rxbr *rbr);
+void vmbus_rxbr_setup(struct vmbus_rxbr *rbr, void *buf, int blen);
+int vmbus_rxbr_peek(struct vmbus_rxbr *rbr, void *data, int dlen);
+int vmbus_rxbr_read(struct vmbus_rxbr *rbr, void *data, int dlen,
+ uint32_t skip);
+void vmbus_rxbr_intr_mask(struct vmbus_rxbr *rbr);
+uint32_t vmbus_rxbr_intr_unmask(struct vmbus_rxbr *rbr);
+
+void vmbus_txbr_init(struct vmbus_txbr *tbr);
+void vmbus_txbr_deinit(struct vmbus_txbr *tbr);
+void vmbus_txbr_setup(struct vmbus_txbr *tbr, void *buf, int blen);
+int vmbus_txbr_write(struct vmbus_txbr *tbr,
+ const struct iovec iov[], int iovlen, boolean_t *need_sig);
+
+#endif /* _VMBUS_BRVAR_H_ */
diff --git a/sys/dev/hyperv/vmbus/vmbus_chan.c b/sys/dev/hyperv/vmbus/vmbus_chan.c
index b7a4b2ccd71f..bb88c0c7be4f 100644
--- a/sys/dev/hyperv/vmbus/vmbus_chan.c
+++ b/sys/dev/hyperv/vmbus/vmbus_chan.c
@@ -31,37 +31,44 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
-#include <sys/malloc.h>
-#include <sys/systm.h>
-#include <sys/mbuf.h>
#include <sys/lock.h>
+#include <sys/malloc.h>
#include <sys/mutex.h>
+#include <sys/smp.h>
#include <sys/sysctl.h>
+#include <sys/systm.h>
#include <machine/atomic.h>
-#include <machine/bus.h>
-
-#include <vm/vm.h>
-#include <vm/vm_param.h>
-#include <vm/pmap.h>
#include <dev/hyperv/include/hyperv_busdma.h>
-#include <dev/hyperv/vmbus/hv_vmbus_priv.h>
#include <dev/hyperv/vmbus/hyperv_var.h>
#include <dev/hyperv/vmbus/vmbus_reg.h>
#include <dev/hyperv/vmbus/vmbus_var.h>
-
-static void vmbus_chan_update_evtflagcnt(struct vmbus_softc *,
- const struct vmbus_channel *);
-
-static void vmbus_chan_task(void *, int);
-static void vmbus_chan_task_nobatch(void *, int);
-static void vmbus_chan_detach_task(void *, int);
-
-static void vmbus_chan_msgproc_choffer(struct vmbus_softc *,
- const struct vmbus_message *);
-static void vmbus_chan_msgproc_chrescind(struct vmbus_softc *,
- const struct vmbus_message *);
+#include <dev/hyperv/vmbus/vmbus_brvar.h>
+#include <dev/hyperv/vmbus/vmbus_chanvar.h>
+
+static void vmbus_chan_update_evtflagcnt(
+ struct vmbus_softc *,
+ const struct vmbus_channel *);
+static void vmbus_chan_close_internal(
+ struct vmbus_channel *);
+static int vmbus_chan_sysctl_mnf(SYSCTL_HANDLER_ARGS);
+static void vmbus_chan_sysctl_create(
+ struct vmbus_channel *);
+static struct vmbus_channel *vmbus_chan_alloc(struct vmbus_softc *);
+static void vmbus_chan_free(struct vmbus_channel *);
+static int vmbus_chan_add(struct vmbus_channel *);
+static void vmbus_chan_cpu_default(struct vmbus_channel *);
+
+static void vmbus_chan_task(void *, int);
+static void vmbus_chan_task_nobatch(void *, int);
+static void vmbus_chan_detach_task(void *, int);
+
+static void vmbus_chan_msgproc_choffer(struct vmbus_softc *,
+ const struct vmbus_message *);
+static void vmbus_chan_msgproc_chrescind(
+ struct vmbus_softc *,
+ const struct vmbus_message *);
/*
* Vmbus channel message processing.
@@ -177,11 +184,11 @@ vmbus_chan_sysctl_create(struct vmbus_channel *chan)
/*
* Create sysctl tree for RX bufring.
*/
- vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_rxbr, "rx");
+ vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_rxbr.rxbr, "rx");
/*
* Create sysctl tree for TX bufring.
*/
- vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_txbr, "tx");
+ vmbus_br_sysctl_create(ctx, br_tree, &chan->ch_txbr.txbr, "tx");
}
}
@@ -238,9 +245,9 @@ vmbus_chan_open(struct vmbus_channel *chan, int txbr_size, int rxbr_size,
chan->ch_bufring = br;
/* TX bufring comes first */
- hv_vmbus_ring_buffer_init(&chan->ch_txbr, br, txbr_size);
+ vmbus_txbr_setup(&chan->ch_txbr, br, txbr_size);
/* RX bufring immediately follows TX bufring */
- hv_vmbus_ring_buffer_init(&chan->ch_rxbr, br + txbr_size, rxbr_size);
+ vmbus_rxbr_setup(&chan->ch_rxbr, br + txbr_size, rxbr_size);
/* Create sysctl tree for this channel */
vmbus_chan_sysctl_create(chan);
@@ -548,8 +555,6 @@ vmbus_chan_close_internal(struct vmbus_channel *chan)
/*
* Destroy the TX+RX bufrings.
*/
- hv_ring_buffer_cleanup(&chan->ch_txbr);
- hv_ring_buffer_cleanup(&chan->ch_rxbr);
if (chan->ch_bufring != NULL) {
hyperv_dmamem_free(&chan->ch_bufring_dma, chan->ch_bufring);
chan->ch_bufring = NULL;
@@ -605,6 +610,8 @@ vmbus_chan_send(struct vmbus_channel *chan, uint16_t type, uint16_t flags,
hlen = sizeof(pkt);
pktlen = hlen + dlen;
pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
+ KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
+ ("invalid packet size %d", pad_pktlen));
pkt.cp_hdr.cph_type = type;
pkt.cp_hdr.cph_flags = flags;
@@ -619,7 +626,7 @@ vmbus_chan_send(struct vmbus_channel *chan, uint16_t type, uint16_t flags,
iov[2].iov_base = &pad;
iov[2].iov_len = pad_pktlen - pktlen;
- error = hv_ring_buffer_write(&chan->ch_txbr, iov, 3, &send_evt);
+ error = vmbus_txbr_write(&chan->ch_txbr, iov, 3, &send_evt);
if (!error && send_evt)
vmbus_chan_signal_tx(chan);
return error;
@@ -635,12 +642,11 @@ vmbus_chan_send_sglist(struct vmbus_channel *chan,
boolean_t send_evt;
uint64_t pad = 0;
- KASSERT(sglen < VMBUS_CHAN_SGLIST_MAX,
- ("invalid sglist len %d", sglen));
-
hlen = __offsetof(struct vmbus_chanpkt_sglist, cp_gpa[sglen]);
pktlen = hlen + dlen;
pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
+ KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
+ ("invalid packet size %d", pad_pktlen));
pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
@@ -659,7 +665,7 @@ vmbus_chan_send_sglist(struct vmbus_channel *chan,
iov[3].iov_base = &pad;
iov[3].iov_len = pad_pktlen - pktlen;
- error = hv_ring_buffer_write(&chan->ch_txbr, iov, 4, &send_evt);
+ error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
if (!error && send_evt)
vmbus_chan_signal_tx(chan);
return error;
@@ -676,13 +682,12 @@ vmbus_chan_send_prplist(struct vmbus_channel *chan,
boolean_t send_evt;
uint64_t pad = 0;
- KASSERT(prp_cnt < VMBUS_CHAN_PRPLIST_MAX,
- ("invalid prplist entry count %d", prp_cnt));
-
hlen = __offsetof(struct vmbus_chanpkt_prplist,
cp_range[0].gpa_page[prp_cnt]);
pktlen = hlen + dlen;
pad_pktlen = VMBUS_CHANPKT_TOTLEN(pktlen);
+ KASSERT(pad_pktlen <= vmbus_txbr_maxpktsz(&chan->ch_txbr),
+ ("invalid packet size %d", pad_pktlen));
pkt.cp_hdr.cph_type = VMBUS_CHANPKT_TYPE_GPA;
pkt.cp_hdr.cph_flags = VMBUS_CHANPKT_FLAG_RC;
@@ -701,7 +706,7 @@ vmbus_chan_send_prplist(struct vmbus_channel *chan,
iov[3].iov_base = &pad;
iov[3].iov_len = pad_pktlen - pktlen;
- error = hv_ring_buffer_write(&chan->ch_txbr, iov, 4, &send_evt);
+ error = vmbus_txbr_write(&chan->ch_txbr, iov, 4, &send_evt);
if (!error && send_evt)
vmbus_chan_signal_tx(chan);
return error;
@@ -714,7 +719,7 @@ vmbus_chan_recv(struct vmbus_channel *chan, void *data, int *dlen0,
struct vmbus_chanpkt_hdr pkt;
int error, dlen, hlen;
- error = hv_ring_buffer_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
+ error = vmbus_rxbr_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
if (error)
return error;
@@ -731,8 +736,8 @@ vmbus_chan_recv(struct vmbus_channel *chan, void *data, int *dlen0,
*dlen0 = dlen;
/* Skip packet header */
- error = hv_ring_buffer_read(&chan->ch_rxbr, data, dlen, hlen);
- KASSERT(!error, ("hv_ring_buffer_read failed"));
+ error = vmbus_rxbr_read(&chan->ch_rxbr, data, dlen, hlen);
+ KASSERT(!error, ("vmbus_rxbr_read failed"));
return 0;
}
@@ -744,7 +749,7 @@ vmbus_chan_recv_pkt(struct vmbus_channel *chan,
struct vmbus_chanpkt_hdr pkt;
int error, pktlen;
- error = hv_ring_buffer_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
+ error = vmbus_rxbr_peek(&chan->ch_rxbr, &pkt, sizeof(pkt));
if (error)
return error;
@@ -757,8 +762,8 @@ vmbus_chan_recv_pkt(struct vmbus_channel *chan,
*pktlen0 = pktlen;
/* Include packet header */
- error = hv_ring_buffer_read(&chan->ch_rxbr, pkt0, pktlen, 0);
- KASSERT(!error, ("hv_ring_buffer_read failed"));
+ error = vmbus_rxbr_read(&chan->ch_rxbr, pkt0, pktlen, 0);
+ KASSERT(!error, ("vmbus_rxbr_read failed"));
return 0;
}
@@ -787,12 +792,12 @@ vmbus_chan_task(void *xchan, int pending __unused)
cb(chan, cbarg);
- left = hv_ring_buffer_read_end(&chan->ch_rxbr);
+ left = vmbus_rxbr_intr_unmask(&chan->ch_rxbr);
if (left == 0) {
/* No more data in RX bufring; done */
break;
}
- hv_ring_buffer_read_begin(&chan->ch_rxbr);
+ vmbus_rxbr_intr_mask(&chan->ch_rxbr);
}
}
@@ -834,7 +839,7 @@ vmbus_event_flags_proc(struct vmbus_softc *sc, volatile u_long *event_flags,
continue;
if (chan->ch_flags & VMBUS_CHAN_FLAG_BATCHREAD)
- hv_ring_buffer_read_begin(&chan->ch_rxbr);
+ vmbus_rxbr_intr_mask(&chan->ch_rxbr);
taskqueue_enqueue(chan->ch_tq, &chan->ch_task);
}
}
@@ -913,6 +918,8 @@ vmbus_chan_alloc(struct vmbus_softc *sc)
mtx_init(&chan->ch_subchan_lock, "vmbus subchan", NULL, MTX_DEF);
TAILQ_INIT(&chan->ch_subchans);
TASK_INIT(&chan->ch_detach_task, 0, vmbus_chan_detach_task, chan);
+ vmbus_rxbr_init(&chan->ch_rxbr);
+ vmbus_txbr_init(&chan->ch_txbr);
return chan;
}
@@ -925,6 +932,8 @@ vmbus_chan_free(struct vmbus_channel *chan)
/* TODO: asset no longer on the vmbus channel list */
hyperv_dmamem_free(&chan->ch_monprm_dma, chan->ch_monprm);
mtx_destroy(&chan->ch_subchan_lock);
+ vmbus_rxbr_deinit(&chan->ch_rxbr);
+ vmbus_txbr_deinit(&chan->ch_txbr);
free(chan, M_DEVBUF);
}
diff --git a/sys/dev/hyperv/vmbus/vmbus_chanvar.h b/sys/dev/hyperv/vmbus/vmbus_chanvar.h
index 593d09d2974b..68a134d3f2fb 100644
--- a/sys/dev/hyperv/vmbus/vmbus_chanvar.h
+++ b/sys/dev/hyperv/vmbus/vmbus_chanvar.h
@@ -39,12 +39,7 @@
#include <dev/hyperv/include/hyperv.h>
#include <dev/hyperv/include/hyperv_busdma.h>
#include <dev/hyperv/include/vmbus.h>
-
-typedef struct {
- struct vmbus_bufring *ring_buffer;
- struct mtx ring_lock;
- uint32_t ring_data_size; /* ring_size */
-} hv_vmbus_ring_buffer_info;
+#include <dev/hyperv/vmbus/vmbus_brvar.h>
struct vmbus_channel {
/*
@@ -57,7 +52,7 @@ struct vmbus_channel {
/*
* RX bufring; immediately following ch_txbr.
*/
- hv_vmbus_ring_buffer_info ch_rxbr;
+ struct vmbus_rxbr ch_rxbr;
struct taskqueue *ch_tq;
struct task ch_task;
@@ -76,7 +71,7 @@ struct vmbus_channel {
* TX bufring and following MNF/evtflags do _not_ fit in
* one 64B cacheline.
*/
- hv_vmbus_ring_buffer_info ch_txbr __aligned(CACHE_LINE_SIZE);
+ struct vmbus_txbr ch_txbr __aligned(CACHE_LINE_SIZE);
uint32_t ch_txflags; /* VMBUS_CHAN_TXF_ */
/*
@@ -160,9 +155,10 @@ struct vmbus_channel {
struct vmbus_softc;
struct vmbus_message;
-void vmbus_event_proc(struct vmbus_softc *, int);
-void vmbus_event_proc_compat(struct vmbus_softc *, int);
-void vmbus_chan_msgproc(struct vmbus_softc *, const struct vmbus_message *);
-void vmbus_chan_destroy_all(struct vmbus_softc *);
+void vmbus_event_proc(struct vmbus_softc *, int);
+void vmbus_event_proc_compat(struct vmbus_softc *, int);
+void vmbus_chan_msgproc(struct vmbus_softc *,
+ const struct vmbus_message *);
+void vmbus_chan_destroy_all(struct vmbus_softc *);
#endif /* !_VMBUS_CHANVAR_H_ */
diff --git a/sys/dev/hyperv/vmbus/vmbus_et.c b/sys/dev/hyperv/vmbus/vmbus_et.c
index 6e3fbfda402d..9e217ece8a22 100644
--- a/sys/dev/hyperv/vmbus/vmbus_et.c
+++ b/sys/dev/hyperv/vmbus/vmbus_et.c
@@ -32,9 +32,8 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/module.h>
#include <sys/proc.h>
-#include <sys/systm.h>
#include <sys/smp.h>
-#include <sys/time.h>
+#include <sys/systm.h>
#include <sys/timeet.h>
#include <dev/hyperv/vmbus/hyperv_reg.h>
@@ -57,8 +56,35 @@ __FBSDID("$FreeBSD$");
CPUID_HV_MSR_SYNIC | \
CPUID_HV_MSR_SYNTIMER)
+static void vmbus_et_identify(driver_t *, device_t);
+static int vmbus_et_probe(device_t);
+static int vmbus_et_attach(device_t);
+static int vmbus_et_detach(device_t);
+static int vmbus_et_start(struct eventtimer *, sbintime_t,
+ sbintime_t);
+
static struct eventtimer vmbus_et;
+static device_method_t vmbus_et_methods[] = {
+ DEVMETHOD(device_identify, vmbus_et_identify),
+ DEVMETHOD(device_probe, vmbus_et_probe),
+ DEVMETHOD(device_attach, vmbus_et_attach),
+ DEVMETHOD(device_detach, vmbus_et_detach),
+
+ DEVMETHOD_END
+};
+
+static driver_t vmbus_et_driver = {
+ VMBUS_ET_NAME,
+ vmbus_et_methods,
+ 0
+};
+
+static devclass_t vmbus_et_devclass;
+
+DRIVER_MODULE(hv_et, vmbus, vmbus_et_driver, vmbus_et_devclass, NULL, NULL);
+MODULE_VERSION(hv_et, 1);
+
static __inline uint64_t
hyperv_sbintime2count(sbintime_t time)
{
@@ -175,22 +201,3 @@ vmbus_et_detach(device_t dev)
{
return (et_deregister(&vmbus_et));
}
-
-static device_method_t vmbus_et_methods[] = {
- DEVMETHOD(device_identify, vmbus_et_identify),
- DEVMETHOD(device_probe, vmbus_et_probe),
- DEVMETHOD(device_attach, vmbus_et_attach),
- DEVMETHOD(device_detach, vmbus_et_detach),
-
- DEVMETHOD_END
-};
-
-static driver_t vmbus_et_driver = {
- VMBUS_ET_NAME,
- vmbus_et_methods,
- 0
-};
-
-static devclass_t vmbus_et_devclass;
-DRIVER_MODULE(hv_et, vmbus, vmbus_et_driver, vmbus_et_devclass, NULL, NULL);
-MODULE_VERSION(hv_et, 1);
diff --git a/sys/dev/hyperv/vmbus/vmbus_var.h b/sys/dev/hyperv/vmbus/vmbus_var.h
index 1316d7443665..47d9004e5719 100644
--- a/sys/dev/hyperv/vmbus/vmbus_var.h
+++ b/sys/dev/hyperv/vmbus/vmbus_var.h
@@ -30,7 +30,6 @@
#define _VMBUS_VAR_H_
#include <sys/param.h>
-#include <sys/bus_dma.h>
#include <sys/taskqueue.h>
#include <dev/hyperv/include/hyperv_busdma.h>
@@ -87,7 +86,7 @@ struct vmbus_softc {
u_long *vmbus_rx_evtflags;
/* compat evtflgs from host */
struct vmbus_channel **vmbus_chmap;
- struct vmbus_msghc_ctx *vmbus_msg_hc;
+ struct vmbus_xact_ctx *vmbus_xc;
struct vmbus_pcpu_data vmbus_pcpu[MAXCPU];
/*
@@ -121,20 +120,6 @@ struct vmbus_softc {
#define VMBUS_FLAG_ATTACHED 0x0001 /* vmbus was attached */
#define VMBUS_FLAG_SYNIC 0x0002 /* SynIC was setup */
-extern struct vmbus_softc *vmbus_sc;
-
-static __inline struct vmbus_softc *
-vmbus_get_softc(void)
-{
- return vmbus_sc;
-}
-
-static __inline device_t
-vmbus_get_device(void)
-{
- return vmbus_sc->vmbus_dev;
-}
-
#define VMBUS_PCPU_GET(sc, field, cpu) (sc)->vmbus_pcpu[(cpu)].field
#define VMBUS_PCPU_PTR(sc, field, cpu) &(sc)->vmbus_pcpu[(cpu)].field
@@ -143,20 +128,23 @@ struct trapframe;
struct vmbus_message;
struct vmbus_msghc;
-void vmbus_handle_intr(struct trapframe *);
-int vmbus_add_child(struct vmbus_channel *);
-int vmbus_delete_child(struct vmbus_channel *);
-void vmbus_et_intr(struct trapframe *);
-uint32_t vmbus_gpadl_alloc(struct vmbus_softc *);
-
-struct vmbus_msghc *vmbus_msghc_get(struct vmbus_softc *, size_t);
-void vmbus_msghc_put(struct vmbus_softc *, struct vmbus_msghc *);
-void *vmbus_msghc_dataptr(struct vmbus_msghc *);
-int vmbus_msghc_exec_noresult(struct vmbus_msghc *);
-int vmbus_msghc_exec(struct vmbus_softc *, struct vmbus_msghc *);
-const struct vmbus_message *vmbus_msghc_wait_result(struct vmbus_softc *,
- struct vmbus_msghc *);
-void vmbus_msghc_wakeup(struct vmbus_softc *, const struct vmbus_message *);
-void vmbus_msghc_reset(struct vmbus_msghc *, size_t);
+void vmbus_handle_intr(struct trapframe *);
+int vmbus_add_child(struct vmbus_channel *);
+int vmbus_delete_child(struct vmbus_channel *);
+void vmbus_et_intr(struct trapframe *);
+uint32_t vmbus_gpadl_alloc(struct vmbus_softc *);
+
+struct vmbus_msghc *
+ vmbus_msghc_get(struct vmbus_softc *, size_t);
+void vmbus_msghc_put(struct vmbus_softc *, struct vmbus_msghc *);
+void *vmbus_msghc_dataptr(struct vmbus_msghc *);
+int vmbus_msghc_exec_noresult(struct vmbus_msghc *);
+int vmbus_msghc_exec(struct vmbus_softc *, struct vmbus_msghc *);
+const struct vmbus_message *
+ vmbus_msghc_wait_result(struct vmbus_softc *,
+ struct vmbus_msghc *);
+void vmbus_msghc_wakeup(struct vmbus_softc *,
+ const struct vmbus_message *);
+void vmbus_msghc_reset(struct vmbus_msghc *, size_t);
#endif /* !_VMBUS_VAR_H_ */
diff --git a/sys/dev/hyperv/vmbus/vmbus_xact.c b/sys/dev/hyperv/vmbus/vmbus_xact.c
new file mode 100644
index 000000000000..642c165bc293
--- /dev/null
+++ b/sys/dev/hyperv/vmbus/vmbus_xact.c
@@ -0,0 +1,313 @@
+/*-
+ * Copyright (c) 2016 Microsoft Corp.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice unmodified, this list of conditions, and the following
+ * disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
+ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
+ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
+ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/lock.h>
+#include <sys/malloc.h>
+#include <sys/mutex.h>
+#include <sys/proc.h>
+#include <sys/systm.h>
+
+#include <dev/hyperv/include/hyperv_busdma.h>
+#include <dev/hyperv/include/vmbus_xact.h>
+
+struct vmbus_xact {
+ struct vmbus_xact_ctx *x_ctx;
+ void *x_priv;
+
+ void *x_req;
+ struct hyperv_dma x_req_dma;
+
+ const void *x_resp;
+ size_t x_resp_len;
+ void *x_resp0;
+};
+
+struct vmbus_xact_ctx {
+ uint32_t xc_flags;
+ size_t xc_req_size;
+ size_t xc_resp_size;
+ size_t xc_priv_size;
+
+ struct vmbus_xact *xc_free;
+ struct mtx xc_free_lock;
+
+ struct vmbus_xact *xc_active;
+ struct mtx xc_active_lock;
+};
+
+#define VMBUS_XACT_CTXF_DESTROY 0x0001
+
+static struct vmbus_xact *vmbus_xact_alloc(struct vmbus_xact_ctx *,
+ bus_dma_tag_t);
+static void vmbus_xact_free(struct vmbus_xact *);
+static struct vmbus_xact *vmbus_xact_get1(struct vmbus_xact_ctx *,
+ uint32_t);
+
+static struct vmbus_xact *
+vmbus_xact_alloc(struct vmbus_xact_ctx *ctx, bus_dma_tag_t parent_dtag)
+{
+ struct vmbus_xact *xact;
+
+ xact = malloc(sizeof(*xact), M_DEVBUF, M_WAITOK | M_ZERO);
+ xact->x_ctx = ctx;
+
+ /* XXX assume that page aligned is enough */
+ xact->x_req = hyperv_dmamem_alloc(parent_dtag, PAGE_SIZE, 0,
+ ctx->xc_req_size, &xact->x_req_dma, BUS_DMA_WAITOK);
+ if (xact->x_req == NULL) {
+ free(xact, M_DEVBUF);
+ return (NULL);
+ }
+ if (ctx->xc_priv_size != 0)
+ xact->x_priv = malloc(ctx->xc_priv_size, M_DEVBUF, M_WAITOK);
+ xact->x_resp0 = malloc(ctx->xc_resp_size, M_DEVBUF, M_WAITOK);
+
+ return (xact);
+}
+
+static void
+vmbus_xact_free(struct vmbus_xact *xact)
+{
+
+ hyperv_dmamem_free(&xact->x_req_dma, xact->x_req);
+ free(xact->x_resp0, M_DEVBUF);
+ if (xact->x_priv != NULL)
+ free(xact->x_priv, M_DEVBUF);
+ free(xact, M_DEVBUF);
+}
+
+static struct vmbus_xact *
+vmbus_xact_get1(struct vmbus_xact_ctx *ctx, uint32_t dtor_flag)
+{
+ struct vmbus_xact *xact;
+
+ mtx_lock(&ctx->xc_free_lock);
+
+ while ((ctx->xc_flags & dtor_flag) == 0 && ctx->xc_free == NULL)
+ mtx_sleep(&ctx->xc_free, &ctx->xc_free_lock, 0, "gxact", 0);
+ if (ctx->xc_flags & dtor_flag) {
+ /* Being destroyed */
+ xact = NULL;
+ } else {
+ xact = ctx->xc_free;
+ KASSERT(xact != NULL, ("no free xact"));
+ KASSERT(xact->x_resp == NULL, ("xact has pending response"));
+ ctx->xc_free = NULL;
+ }
+
+ mtx_unlock(&ctx->xc_free_lock);
+
+ return (xact);
+}
+
+struct vmbus_xact_ctx *
+vmbus_xact_ctx_create(bus_dma_tag_t dtag, size_t req_size, size_t resp_size,
+ size_t priv_size)
+{
+ struct vmbus_xact_ctx *ctx;
+
+ ctx = malloc(sizeof(*ctx), M_DEVBUF, M_WAITOK | M_ZERO);
+ ctx->xc_req_size = req_size;
+ ctx->xc_resp_size = resp_size;
+ ctx->xc_priv_size = priv_size;
+
+ ctx->xc_free = vmbus_xact_alloc(ctx, dtag);
+ if (ctx->xc_free == NULL) {
+ free(ctx, M_DEVBUF);
+ return (NULL);
+ }
+
+ mtx_init(&ctx->xc_free_lock, "vmbus xact free", NULL, MTX_DEF);
+ mtx_init(&ctx->xc_active_lock, "vmbus xact active", NULL, MTX_DEF);
+
+ return (ctx);
+}
+
+void
+vmbus_xact_ctx_destroy(struct vmbus_xact_ctx *ctx)
+{
+ struct vmbus_xact *xact;
+
+ mtx_lock(&ctx->xc_free_lock);
+ ctx->xc_flags |= VMBUS_XACT_CTXF_DESTROY;
+ mtx_unlock(&ctx->xc_free_lock);
+ wakeup(&ctx->xc_free);
+
+ xact = vmbus_xact_get1(ctx, 0);
+ if (xact == NULL)
+ panic("can't get xact");
+
+ vmbus_xact_free(xact);
+ mtx_destroy(&ctx->xc_free_lock);
+ mtx_destroy(&ctx->xc_active_lock);
+ free(ctx, M_DEVBUF);
+}
+
+struct vmbus_xact *
+vmbus_xact_get(struct vmbus_xact_ctx *ctx, size_t req_len)
+{
+ struct vmbus_xact *xact;
+
+ if (req_len > ctx->xc_req_size)
+ panic("invalid request size %zu", req_len);
+
+ xact = vmbus_xact_get1(ctx, VMBUS_XACT_CTXF_DESTROY);
+ if (xact == NULL)
+ return (NULL);
+
+ memset(xact->x_req, 0, req_len);
+ return (xact);
+}
+
+void
+vmbus_xact_put(struct vmbus_xact *xact)
+{
+ struct vmbus_xact_ctx *ctx = xact->x_ctx;
+
+ KASSERT(ctx->xc_active == NULL, ("pending active xact"));
+ xact->x_resp = NULL;
+
+ mtx_lock(&ctx->xc_free_lock);
+ KASSERT(ctx->xc_free == NULL, ("has free xact"));
+ ctx->xc_free = xact;
+ mtx_unlock(&ctx->xc_free_lock);
+ wakeup(&ctx->xc_free);
+}
+
+void *
+vmbus_xact_req_data(const struct vmbus_xact *xact)
+{
+
+ return (xact->x_req);
+}
+
+bus_addr_t
+vmbus_xact_req_paddr(const struct vmbus_xact *xact)
+{
+
+ return (xact->x_req_dma.hv_paddr);
+}
+
+void *
+vmbus_xact_priv(const struct vmbus_xact *xact, size_t priv_len)
+{
+
+ if (priv_len > xact->x_ctx->xc_priv_size)
+ panic("invalid priv size %zu", priv_len);
+ return (xact->x_priv);
+}
+
+void
+vmbus_xact_activate(struct vmbus_xact *xact)
+{
+ struct vmbus_xact_ctx *ctx = xact->x_ctx;
+
+ KASSERT(xact->x_resp == NULL, ("xact has pending response"));
+
+ mtx_lock(&ctx->xc_active_lock);
+ KASSERT(ctx->xc_active == NULL, ("pending active xact"));
+ ctx->xc_active = xact;
+ mtx_unlock(&ctx->xc_active_lock);
+}
+
+void
+vmbus_xact_deactivate(struct vmbus_xact *xact)
+{
+ struct vmbus_xact_ctx *ctx = xact->x_ctx;
+
+ mtx_lock(&ctx->xc_active_lock);
+ KASSERT(ctx->xc_active == xact, ("xact mismatch"));
+ ctx->xc_active = NULL;
+ mtx_unlock(&ctx->xc_active_lock);
+}
+
+const void *
+vmbus_xact_wait(struct vmbus_xact *xact, size_t *resp_len)
+{
+ struct vmbus_xact_ctx *ctx = xact->x_ctx;
+ const void *resp;
+
+ mtx_lock(&ctx->xc_active_lock);
+
+ KASSERT(ctx->xc_active == xact, ("xact mismatch"));
+ while (xact->x_resp == NULL) {
+ mtx_sleep(&ctx->xc_active, &ctx->xc_active_lock, 0,
+ "wxact", 0);
+ }
+ ctx->xc_active = NULL;
+
+ resp = xact->x_resp;
+ *resp_len = xact->x_resp_len;
+
+ mtx_unlock(&ctx->xc_active_lock);
+
+ return (resp);
+}
+
+static void
+vmbus_xact_save_resp(struct vmbus_xact *xact, const void *data, size_t dlen)
+{
+ struct vmbus_xact_ctx *ctx = xact->x_ctx;
+ size_t cplen = dlen;
+
+ mtx_assert(&ctx->xc_active_lock, MA_OWNED);
+
+ if (cplen > ctx->xc_resp_size) {
+ printf("vmbus: xact response truncated %zu -> %zu\n",
+ cplen, ctx->xc_resp_size);
+ cplen = ctx->xc_resp_size;
+ }
+
+ KASSERT(ctx->xc_active == xact, ("xact mismatch"));
+ memcpy(xact->x_resp0, data, cplen);
+ xact->x_resp_len = cplen;
+ xact->x_resp = xact->x_resp0;
+}
+
+void
+vmbus_xact_wakeup(struct vmbus_xact *xact, const void *data, size_t dlen)
+{
+ struct vmbus_xact_ctx *ctx = xact->x_ctx;
+
+ mtx_lock(&ctx->xc_active_lock);
+ vmbus_xact_save_resp(xact, data, dlen);
+ mtx_unlock(&ctx->xc_active_lock);
+ wakeup(&ctx->xc_active);
+}
+
+void
+vmbus_xact_ctx_wakeup(struct vmbus_xact_ctx *ctx, const void *data, size_t dlen)
+{
+ mtx_lock(&ctx->xc_active_lock);
+ KASSERT(ctx->xc_active != NULL, ("no pending xact"));
+ vmbus_xact_save_resp(ctx->xc_active, data, dlen);
+ mtx_unlock(&ctx->xc_active_lock);
+ wakeup(&ctx->xc_active);
+}
diff --git a/sys/dev/ioat/ioat.c b/sys/dev/ioat/ioat.c
index 0b04e13266a2..45613498adfc 100644
--- a/sys/dev/ioat/ioat.c
+++ b/sys/dev/ioat/ioat.c
@@ -663,7 +663,7 @@ ioat_process_events(struct ioat_softc *ioat)
boolean_t pending;
int error;
- CTR0(KTR_IOAT, __func__);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
mtx_lock(&ioat->cleanup_lock);
@@ -693,8 +693,9 @@ ioat_process_events(struct ioat_softc *ioat)
while (1) {
desc = ioat_get_ring_entry(ioat, ioat->tail);
dmadesc = &desc->bus_dmadesc;
- CTR3(KTR_IOAT, "completing desc %u ok cb %p(%p)", ioat->tail,
- dmadesc->callback_fn, dmadesc->callback_arg);
+ CTR4(KTR_IOAT, "channel=%u completing desc %u ok cb %p(%p)",
+ ioat->chan_idx, ioat->tail, dmadesc->callback_fn,
+ dmadesc->callback_arg);
if (dmadesc->callback_fn != NULL)
dmadesc->callback_fn(dmadesc->callback_arg, 0);
@@ -764,8 +765,9 @@ out:
while (ioat_get_active(ioat) > 0) {
desc = ioat_get_ring_entry(ioat, ioat->tail);
dmadesc = &desc->bus_dmadesc;
- CTR3(KTR_IOAT, "completing desc %u err cb %p(%p)", ioat->tail,
- dmadesc->callback_fn, dmadesc->callback_arg);
+ CTR4(KTR_IOAT, "channel=%u completing desc %u err cb %p(%p)",
+ ioat->chan_idx, ioat->tail, dmadesc->callback_fn,
+ dmadesc->callback_arg);
if (dmadesc->callback_fn != NULL)
dmadesc->callback_fn(dmadesc->callback_arg,
@@ -919,7 +921,7 @@ ioat_acquire(bus_dmaengine_t dmaengine)
ioat = to_ioat_softc(dmaengine);
mtx_lock(&ioat->submit_lock);
- CTR0(KTR_IOAT, __func__);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
}
int
@@ -943,7 +945,7 @@ ioat_release(bus_dmaengine_t dmaengine)
struct ioat_softc *ioat;
ioat = to_ioat_softc(dmaengine);
- CTR0(KTR_IOAT, __func__);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head);
mtx_unlock(&ioat->submit_lock);
}
@@ -1005,8 +1007,8 @@ ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
struct ioat_descriptor *desc;
struct ioat_softc *ioat;
- CTR0(KTR_IOAT, __func__);
ioat = to_ioat_softc(dmaengine);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
callback_arg, flags);
@@ -1028,8 +1030,8 @@ ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
struct ioat_descriptor *desc;
struct ioat_softc *ioat;
- CTR0(KTR_IOAT, __func__);
ioat = to_ioat_softc(dmaengine);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
if (((src | dst) & (0xffffull << 48)) != 0) {
ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
@@ -1059,8 +1061,8 @@ ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
struct ioat_descriptor *desc;
struct ioat_softc *ioat;
- CTR0(KTR_IOAT, __func__);
ioat = to_ioat_softc(dmaengine);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
@@ -1106,8 +1108,8 @@ ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
uint32_t teststore;
uint8_t op;
- CTR0(KTR_IOAT, __func__);
ioat = to_ioat_softc(dmaengine);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) {
ioat_log_message(0, "%s: Device lacks MOVECRC capability\n",
@@ -1185,8 +1187,8 @@ ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
uint32_t teststore;
uint8_t op;
- CTR0(KTR_IOAT, __func__);
ioat = to_ioat_softc(dmaengine);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) {
ioat_log_message(0, "%s: Device lacks CRC capability\n",
@@ -1262,8 +1264,8 @@ ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
struct ioat_descriptor *desc;
struct ioat_softc *ioat;
- CTR0(KTR_IOAT, __func__);
ioat = to_ioat_softc(dmaengine);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
ioat_log_message(0, "%s: Device lacks BFILL capability\n",
@@ -1528,7 +1530,7 @@ ring_grow(struct ioat_softc *ioat, uint32_t oldorder,
uint32_t oldsize, newsize, head, tail, i, end;
int error;
- CTR0(KTR_IOAT, __func__);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
mtx_assert(&ioat->submit_lock, MA_OWNED);
@@ -1626,7 +1628,7 @@ ring_shrink(struct ioat_softc *ioat, uint32_t oldorder,
uint32_t oldsize, newsize, current_idx, new_idx, i;
int error;
- CTR0(KTR_IOAT, __func__);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
mtx_assert(&ioat->submit_lock, MA_OWNED);
@@ -1802,7 +1804,7 @@ ioat_reset_hw(struct ioat_softc *ioat)
unsigned timeout;
int error;
- CTR0(KTR_IOAT, __func__);
+ CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
mtx_lock(IOAT_REFLK);
while (ioat->resetting && !ioat->destroying)
diff --git a/sys/dev/iwm/if_iwm.c b/sys/dev/iwm/if_iwm.c
index 87db721ddd95..8d39c4004490 100644
--- a/sys/dev/iwm/if_iwm.c
+++ b/sys/dev/iwm/if_iwm.c
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_iwm.c,v 1.39 2015/03/23 00:35:19 jsg Exp $ */
+/* $OpenBSD: if_iwm.c,v 1.42 2015/05/30 02:49:23 deraadt Exp $ */
/*
* Copyright (c) 2014 genua mbh <info@genua.de>
@@ -173,11 +173,23 @@ const uint8_t iwm_nvm_channels[] = {
100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
149, 153, 157, 161, 165
};
-#define IWM_NUM_2GHZ_CHANNELS 14
-
_Static_assert(nitems(iwm_nvm_channels) <= IWM_NUM_CHANNELS,
"IWM_NUM_CHANNELS is too small");
+const uint8_t iwm_nvm_channels_8000[] = {
+ /* 2.4 GHz */
+ 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
+ /* 5 GHz */
+ 36, 40, 44, 48, 52, 56, 60, 64, 68, 72, 76, 80, 84, 88, 92,
+ 96, 100, 104, 108, 112, 116, 120, 124, 128, 132, 136, 140, 144,
+ 149, 153, 157, 161, 165, 169, 173, 177, 181
+};
+_Static_assert(nitems(iwm_nvm_channels_8000) <= IWM_NUM_CHANNELS_8000,
+ "IWM_NUM_CHANNELS_8000 is too small");
+
+#define IWM_NUM_2GHZ_CHANNELS 14
+#define IWM_N_HW_ADDR_MASK 0xF
+
/*
* XXX For now, there's simply a fixed set of rate table entries
* that are populated.
@@ -205,6 +217,11 @@ const struct iwm_rate {
#define IWM_RIDX_IS_CCK(_i_) ((_i_) < IWM_RIDX_OFDM)
#define IWM_RIDX_IS_OFDM(_i_) ((_i_) >= IWM_RIDX_OFDM)
+struct iwm_nvm_section {
+ uint16_t length;
+ uint8_t *data;
+};
+
static int iwm_store_cscheme(struct iwm_softc *, const uint8_t *, size_t);
static int iwm_firmware_store_section(struct iwm_softc *,
enum iwm_ucode_type,
@@ -242,27 +259,45 @@ static void iwm_mvm_nic_config(struct iwm_softc *);
static int iwm_nic_rx_init(struct iwm_softc *);
static int iwm_nic_tx_init(struct iwm_softc *);
static int iwm_nic_init(struct iwm_softc *);
-static void iwm_enable_txq(struct iwm_softc *, int, int);
+static int iwm_enable_txq(struct iwm_softc *, int, int, int);
static int iwm_post_alive(struct iwm_softc *);
static int iwm_nvm_read_chunk(struct iwm_softc *, uint16_t, uint16_t,
uint16_t, uint8_t *, uint16_t *);
static int iwm_nvm_read_section(struct iwm_softc *, uint16_t, uint8_t *,
- uint16_t *);
+ uint16_t *, size_t);
static uint32_t iwm_eeprom_channel_flags(uint16_t);
static void iwm_add_channel_band(struct iwm_softc *,
- struct ieee80211_channel[], int, int *, int, int,
+ struct ieee80211_channel[], int, int *, int, size_t,
const uint8_t[]);
static void iwm_init_channel_map(struct ieee80211com *, int, int *,
struct ieee80211_channel[]);
static int iwm_parse_nvm_data(struct iwm_softc *, const uint16_t *,
- const uint16_t *, const uint16_t *, uint8_t,
- uint8_t);
-struct iwm_nvm_section;
+ const uint16_t *, const uint16_t *,
+ const uint16_t *, const uint16_t *,
+ const uint16_t *);
+static void iwm_set_hw_address_8000(struct iwm_softc *,
+ struct iwm_nvm_data *,
+ const uint16_t *, const uint16_t *);
+static int iwm_get_sku(const struct iwm_softc *, const uint16_t *,
+ const uint16_t *);
+static int iwm_get_nvm_version(const struct iwm_softc *, const uint16_t *);
+static int iwm_get_radio_cfg(const struct iwm_softc *, const uint16_t *,
+ const uint16_t *);
+static int iwm_get_n_hw_addrs(const struct iwm_softc *,
+ const uint16_t *);
+static void iwm_set_radio_cfg(const struct iwm_softc *,
+ struct iwm_nvm_data *, uint32_t);
static int iwm_parse_nvm_sections(struct iwm_softc *,
struct iwm_nvm_section *);
static int iwm_nvm_init(struct iwm_softc *);
+static int iwm_firmware_load_sect(struct iwm_softc *, uint32_t,
+ const uint8_t *, uint32_t);
static int iwm_firmware_load_chunk(struct iwm_softc *, uint32_t,
const uint8_t *, uint32_t);
+static int iwm_load_firmware_7000(struct iwm_softc *, enum iwm_ucode_type);
+static int iwm_load_cpu_sections_8000(struct iwm_softc *,
+ struct iwm_fw_sects *, int , int *);
+static int iwm_load_firmware_8000(struct iwm_softc *, enum iwm_ucode_type);
static int iwm_load_firmware(struct iwm_softc *, enum iwm_ucode_type);
static int iwm_start_fw(struct iwm_softc *, enum iwm_ucode_type);
static int iwm_send_tx_ant_cfg(struct iwm_softc *, uint8_t);
@@ -297,10 +332,8 @@ static int iwm_tx(struct iwm_softc *, struct mbuf *,
struct ieee80211_node *, int);
static int iwm_raw_xmit(struct ieee80211_node *, struct mbuf *,
const struct ieee80211_bpf_params *);
-static void iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *,
- struct iwm_mvm_add_sta_cmd_v5 *);
static int iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *,
- struct iwm_mvm_add_sta_cmd_v6 *,
+ struct iwm_mvm_add_sta_cmd_v7 *,
int *);
static int iwm_mvm_sta_send_to_fw(struct iwm_softc *, struct iwm_node *,
int);
@@ -321,6 +354,13 @@ static void iwm_setrates(struct iwm_softc *, struct iwm_node *);
static int iwm_media_change(struct ifnet *);
static int iwm_newstate(struct ieee80211vap *, enum ieee80211_state, int);
static void iwm_endscan_cb(void *, int);
+static void iwm_mvm_fill_sf_command(struct iwm_softc *,
+ struct iwm_sf_cfg_cmd *,
+ struct ieee80211_node *);
+static int iwm_mvm_sf_config(struct iwm_softc *, enum iwm_sf_state);
+static int iwm_send_bt_init_conf(struct iwm_softc *);
+static int iwm_send_update_mcc_cmd(struct iwm_softc *, const char *);
+static void iwm_mvm_tt_tx_backoff(struct iwm_softc *, uint32_t);
static int iwm_init_hw(struct iwm_softc *);
static void iwm_init(struct iwm_softc *);
static void iwm_start(struct iwm_softc *);
@@ -331,10 +371,12 @@ static void iwm_parent(struct ieee80211com *);
static const char *
iwm_desc_lookup(uint32_t);
static void iwm_nic_error(struct iwm_softc *);
+static void iwm_nic_umac_error(struct iwm_softc *);
#endif
static void iwm_notif_intr(struct iwm_softc *);
static void iwm_intr(void *);
static int iwm_attach(device_t);
+static int iwm_is_valid_ether_addr(uint8_t *);
static void iwm_preinit(void *);
static int iwm_detach_local(struct iwm_softc *sc, int);
static void iwm_init_task(void *);
@@ -477,6 +519,12 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
}
fw->fw_fp = fwp;
+ /* (Re-)Initialize default values. */
+ sc->sc_capaflags = 0;
+ sc->sc_capa_n_scan_channels = IWM_MAX_NUM_SCAN_CHANNELS;
+ memset(sc->sc_enabled_capa, 0, sizeof(sc->sc_enabled_capa));
+ memset(sc->sc_fw_mcc, 0, sizeof(sc->sc_fw_mcc));
+
/*
* Parse firmware contents
*/
@@ -490,7 +538,10 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
goto out;
}
- sc->sc_fwver = le32toh(uhdr->ver);
+ snprintf(sc->sc_fwver, sizeof(sc->sc_fwver), "%d.%d (API ver %d)",
+ IWM_UCODE_MAJOR(le32toh(uhdr->ver)),
+ IWM_UCODE_MINOR(le32toh(uhdr->ver)),
+ IWM_UCODE_API(le32toh(uhdr->ver)));
data = uhdr->data;
len = fw->fw_fp->datasize - sizeof(*uhdr);
@@ -527,7 +578,8 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
sc->sc_capa_max_probe_len
= le32toh(*(const uint32_t *)tlv_data);
/* limit it to something sensible */
- if (sc->sc_capa_max_probe_len > (1<<16)) {
+ if (sc->sc_capa_max_probe_len >
+ IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE) {
IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
"%s: IWM_UCODE_TLV_PROBE_MAX_LEN "
"ridiculous\n", __func__);
@@ -578,7 +630,8 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
goto parse_out;
}
break;
- case IWM_UCODE_TLV_NUM_OF_CPU:
+ case IWM_UCODE_TLV_NUM_OF_CPU: {
+ uint32_t num_cpu;
if (tlv_len != sizeof(uint32_t)) {
device_printf(sc->sc_dev,
"%s: IWM_UCODE_TLV_NUM_OF_CPU: tlv_len (%d) < sizeof(uint32_t)\n",
@@ -587,15 +640,16 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
error = EINVAL;
goto parse_out;
}
- if (le32toh(*(const uint32_t*)tlv_data) != 1) {
+ num_cpu = le32toh(*(const uint32_t *)tlv_data);
+ if (num_cpu < 1 || num_cpu > 2) {
device_printf(sc->sc_dev,
- "%s: driver supports "
- "only TLV_NUM_OF_CPU == 1",
+ "%s: Driver supports only 1 or 2 CPUs\n",
__func__);
error = EINVAL;
goto parse_out;
}
break;
+ }
case IWM_UCODE_TLV_SEC_RT:
if ((error = iwm_firmware_store_section(sc,
IWM_UCODE_TYPE_REGULAR, tlv_data, tlv_len)) != 0) {
@@ -657,11 +711,80 @@ iwm_read_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
le32toh(*(const uint32_t *)tlv_data);
break;
- case IWM_UCODE_TLV_API_CHANGES_SET:
- case IWM_UCODE_TLV_ENABLED_CAPABILITIES:
+ case IWM_UCODE_TLV_API_CHANGES_SET: {
+ const struct iwm_ucode_api *api;
+ if (tlv_len != sizeof(*api)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ api = (const struct iwm_ucode_api *)tlv_data;
+ /* Flags may exceed 32 bits in future firmware. */
+ if (le32toh(api->api_index) > 0) {
+ device_printf(sc->sc_dev,
+ "unsupported API index %d\n",
+ le32toh(api->api_index));
+ goto parse_out;
+ }
+ sc->sc_ucode_api = le32toh(api->api_flags);
+ break;
+ }
+
+ case IWM_UCODE_TLV_ENABLED_CAPABILITIES: {
+ const struct iwm_ucode_capa *capa;
+ int idx, i;
+ if (tlv_len != sizeof(*capa)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ capa = (const struct iwm_ucode_capa *)tlv_data;
+ idx = le32toh(capa->api_index);
+ if (idx > howmany(IWM_NUM_UCODE_TLV_CAPA, 32)) {
+ device_printf(sc->sc_dev,
+ "unsupported API index %d\n", idx);
+ goto parse_out;
+ }
+ for (i = 0; i < 32; i++) {
+ if ((le32toh(capa->api_capa) & (1U << i)) == 0)
+ continue;
+ setbit(sc->sc_enabled_capa, i + (32 * idx));
+ }
+ break;
+ }
+
+ case 48: /* undocumented TLV */
+ case IWM_UCODE_TLV_SDIO_ADMA_ADDR:
+ case IWM_UCODE_TLV_FW_GSCAN_CAPA:
/* ignore, not used by current driver */
break;
+ case IWM_UCODE_TLV_SEC_RT_USNIFFER:
+ if ((error = iwm_firmware_store_section(sc,
+ IWM_UCODE_TYPE_REGULAR_USNIFFER, tlv_data,
+ tlv_len)) != 0)
+ goto parse_out;
+ break;
+
+ case IWM_UCODE_TLV_N_SCAN_CHANNELS:
+ if (tlv_len != sizeof(uint32_t)) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ sc->sc_capa_n_scan_channels =
+ le32toh(*(const uint32_t *)tlv_data);
+ break;
+
+ case IWM_UCODE_TLV_FW_VERSION:
+ if (tlv_len != sizeof(uint32_t) * 3) {
+ error = EINVAL;
+ goto parse_out;
+ }
+ snprintf(sc->sc_fwver, sizeof(sc->sc_fwver),
+ "%d.%d.%d",
+ le32toh(((const uint32_t *)tlv_data)[0]),
+ le32toh(((const uint32_t *)tlv_data)[1]),
+ le32toh(((const uint32_t *)tlv_data)[2]));
+ break;
+
default:
device_printf(sc->sc_dev,
"%s: unknown firmware section %d, abort\n",
@@ -710,7 +833,7 @@ iwm_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
if (error != 0)
return;
KASSERT(nsegs == 1, ("too many DMA segments, %d should be 1", nsegs));
- *(bus_addr_t *)arg = segs[0].ds_addr;
+ *(bus_addr_t *)arg = segs[0].ds_addr;
}
static int
@@ -720,6 +843,7 @@ iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
int error;
dma->tag = NULL;
+ dma->map = NULL;
dma->size = size;
dma->vaddr = NULL;
@@ -739,14 +863,16 @@ iwm_dma_contig_alloc(bus_dma_tag_t tag, struct iwm_dma_info *dma,
if (error != 0) {
bus_dmamem_free(dma->tag, dma->vaddr, dma->map);
dma->vaddr = NULL;
- goto fail;
+ goto fail;
}
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
return 0;
-fail: iwm_dma_contig_free(dma);
+fail:
+ iwm_dma_contig_free(dma);
+
return error;
}
@@ -764,7 +890,6 @@ iwm_dma_contig_free(struct iwm_dma_info *dma)
bus_dma_tag_destroy(dma->tag);
dma->tag = NULL;
}
-
}
/* fwmem is used to load firmware onto the card */
@@ -786,12 +911,9 @@ iwm_free_fwmem(struct iwm_softc *sc)
static int
iwm_alloc_sched(struct iwm_softc *sc)
{
- int rv;
-
/* TX scheduler rings must be aligned on a 1KB boundary. */
- rv = iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
+ return iwm_dma_contig_alloc(sc->sc_dmat, &sc->sched_dma,
nitems(sc->txq) * sizeof(struct iwm_agn_scd_bc_tbl), 1024);
- return rv;
}
static void
@@ -901,7 +1023,7 @@ fail: iwm_free_rx_ring(sc, ring);
static void
iwm_disable_rx_dma(struct iwm_softc *sc)
{
-
+ /* XXX conditional nic locks are stupid */
/* XXX print out if we can't lock the NIC? */
if (iwm_nic_lock(sc)) {
/* XXX handle if RX stop doesn't finish? */
@@ -915,6 +1037,11 @@ iwm_reset_rx_ring(struct iwm_softc *sc, struct iwm_rx_ring *ring)
{
/* Reset the ring state */
ring->cur = 0;
+
+ /*
+ * The hw rx ring index in shared memory must also be cleared,
+ * otherwise the discrepancy can cause reprocessing chaos.
+ */
memset(sc->rxq.stat, 0, sizeof(*sc->rxq.stat));
}
@@ -1125,6 +1252,7 @@ iwm_ict_reset(struct iwm_softc *sc)
/* Set physical address of ICT table (4KB aligned). */
IWM_WRITE(sc, IWM_CSR_DRAM_INT_TBL_REG,
IWM_CSR_DRAM_INT_TBL_ENABLE
+ | IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER
| IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK
| sc->ict_dma.paddr >> IWM_ICT_PADDR_SHIFT);
@@ -1150,8 +1278,8 @@ iwm_stop_device(struct iwm_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- int chnl, ntries;
- int qid;
+ int chnl, qid;
+ uint32_t mask = 0;
/* tell the device to stop sending interrupts */
iwm_disable_interrupts(sc);
@@ -1173,20 +1301,20 @@ iwm_stop_device(struct iwm_softc *sc)
iwm_write_prph(sc, IWM_SCD_TXFACT, 0);
- /* Stop all DMA channels. */
if (iwm_nic_lock(sc)) {
+ /* Stop each Tx DMA channel */
for (chnl = 0; chnl < IWM_FH_TCSR_CHNL_NUM; chnl++) {
IWM_WRITE(sc,
IWM_FH_TCSR_CHNL_TX_CONFIG_REG(chnl), 0);
- for (ntries = 0; ntries < 200; ntries++) {
- uint32_t r;
-
- r = IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG);
- if (r & IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(
- chnl))
- break;
- DELAY(20);
- }
+ mask |= IWM_FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(chnl);
+ }
+
+ /* Wait for DMA channels to be idle */
+ if (!iwm_poll_bit(sc, IWM_FH_TSSR_TX_STATUS_REG, mask, mask,
+ 5000)) {
+ device_printf(sc->sc_dev,
+ "Failing on timeout while stopping DMA channel: [0x%08x]\n",
+ IWM_READ(sc, IWM_FH_TSSR_TX_STATUS_REG));
}
iwm_nic_unlock(sc);
}
@@ -1217,7 +1345,7 @@ iwm_stop_device(struct iwm_softc *sc)
*/
iwm_disable_interrupts(sc);
/* stop and reset the on-board processor */
- IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
+ IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
/*
* Even if we stop the HW, we still want the RF kill
@@ -1263,9 +1391,11 @@ iwm_mvm_nic_config(struct iwm_softc *sc)
* (PCIe power is lost before PERST# is asserted), causing ME FW
* to lose ownership and not being able to obtain it back.
*/
- iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
- IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
- ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
+ iwm_set_bits_mask_prph(sc, IWM_APMG_PS_CTRL_REG,
+ IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS,
+ ~IWM_APMG_PS_CTRL_EARLY_PWR_OFF_RESET_DIS);
+ }
}
static int
@@ -1295,16 +1425,12 @@ iwm_nic_rx_init(struct iwm_softc *sc)
IWM_FH_RSCSR_CHNL0_STTS_WPTR_REG, sc->rxq.stat_dma.paddr >> 4);
/* Enable RX. */
- /*
- * Note: Linux driver also sets this:
- * (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
- *
- * It causes weird behavior. YMMV.
- */
IWM_WRITE(sc, IWM_FH_MEM_RCSR_CHNL0_CONFIG_REG,
IWM_FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
IWM_FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | /* HW bug */
IWM_FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
+ IWM_FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
+ (IWM_RX_RB_TIMEOUT << IWM_FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
IWM_FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K |
IWM_RX_QUEUE_SIZE_LOG << IWM_FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS);
@@ -1318,7 +1444,7 @@ iwm_nic_rx_init(struct iwm_softc *sc)
* Thus sayeth el jefe (iwlwifi) via a comment:
*
* This value should initially be 0 (before preparing any
- * RBs), should be 8 after preparing the first 8 RBs (for example)
+ * RBs), should be 8 after preparing the first 8 RBs (for example)
*/
IWM_WRITE(sc, IWM_FH_RSCSR_CHNL0_WPTR, 8);
@@ -1354,6 +1480,9 @@ iwm_nic_tx_init(struct iwm_softc *sc)
qid, txq->desc,
(unsigned long) (txq->desc_dma.paddr >> 8));
}
+
+ iwm_write_prph(sc, IWM_SCD_GP_CTRL, IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE);
+
iwm_nic_unlock(sc);
return 0;
@@ -1365,7 +1494,8 @@ iwm_nic_init(struct iwm_softc *sc)
int error;
iwm_apm_init(sc);
- iwm_set_pwr(sc);
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
+ iwm_set_pwr(sc);
iwm_mvm_nic_config(sc);
@@ -1392,52 +1522,79 @@ const uint8_t iwm_mvm_ac_to_tx_fifo[] = {
IWM_MVM_TX_FIFO_BK,
};
-static void
-iwm_enable_txq(struct iwm_softc *sc, int qid, int fifo)
+static int
+iwm_enable_txq(struct iwm_softc *sc, int sta_id, int qid, int fifo)
{
if (!iwm_nic_lock(sc)) {
device_printf(sc->sc_dev,
"%s: cannot enable txq %d\n",
__func__,
qid);
- return; /* XXX return EBUSY */
+ return EBUSY;
}
- /* unactivate before configuration */
- iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
- (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
- | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+ IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
+
+ if (qid == IWM_MVM_CMD_QUEUE) {
+ /* unactivate before configuration */
+ iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
+ (0 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE)
+ | (1 << IWM_SCD_QUEUE_STTS_REG_POS_SCD_ACT_EN));
+
+ iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
+
+ iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
+
+ iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
+ /* Set scheduler window size and frame limit. */
+ iwm_write_mem32(sc,
+ sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
+ sizeof(uint32_t),
+ ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
+ IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
+ ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
+ IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
+
+ iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
+ (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
+ (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
+ (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
+ IWM_SCD_QUEUE_STTS_REG_MSK);
+ } else {
+ struct iwm_scd_txq_cfg_cmd cmd;
+ int error;
- if (qid != IWM_MVM_CMD_QUEUE) {
- iwm_set_bits_prph(sc, IWM_SCD_QUEUECHAIN_SEL, (1 << qid));
- }
+ iwm_nic_unlock(sc);
- iwm_clear_bits_prph(sc, IWM_SCD_AGGR_SEL, (1 << qid));
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.scd_queue = qid;
+ cmd.enable = 1;
+ cmd.sta_id = sta_id;
+ cmd.tx_fifo = fifo;
+ cmd.aggregate = 0;
+ cmd.window = IWM_FRAME_LIMIT;
- IWM_WRITE(sc, IWM_HBUS_TARG_WRPTR, qid << 8 | 0);
- iwm_write_prph(sc, IWM_SCD_QUEUE_RDPTR(qid), 0);
-
- iwm_write_mem32(sc, sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid), 0);
- /* Set scheduler window size and frame limit. */
- iwm_write_mem32(sc,
- sc->sched_base + IWM_SCD_CONTEXT_QUEUE_OFFSET(qid) +
- sizeof(uint32_t),
- ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
- IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
- ((IWM_FRAME_LIMIT << IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
- IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
-
- iwm_write_prph(sc, IWM_SCD_QUEUE_STATUS_BITS(qid),
- (1 << IWM_SCD_QUEUE_STTS_REG_POS_ACTIVE) |
- (fifo << IWM_SCD_QUEUE_STTS_REG_POS_TXF) |
- (1 << IWM_SCD_QUEUE_STTS_REG_POS_WSL) |
- IWM_SCD_QUEUE_STTS_REG_MSK);
+ error = iwm_mvm_send_cmd_pdu(sc, IWM_SCD_QUEUE_CFG, IWM_CMD_SYNC,
+ sizeof(cmd), &cmd);
+ if (error) {
+ device_printf(sc->sc_dev,
+ "cannot enable txq %d\n", qid);
+ return error;
+ }
+
+ if (!iwm_nic_lock(sc))
+ return EBUSY;
+ }
+
+ iwm_write_prph(sc, IWM_SCD_EN_CTRL,
+ iwm_read_prph(sc, IWM_SCD_EN_CTRL) | qid);
iwm_nic_unlock(sc);
- IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
- "%s: enabled txq %d FIFO %d\n",
+ IWM_DPRINTF(sc, IWM_DEBUG_XMIT, "%s: enabled txq %d FIFO %d\n",
__func__, qid, fifo);
+
+ return 0;
}
static int
@@ -1445,16 +1602,16 @@ iwm_post_alive(struct iwm_softc *sc)
{
int nwords;
int error, chnl;
+ uint32_t base;
if (!iwm_nic_lock(sc))
return EBUSY;
- if (sc->sched_base != iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR)) {
+ base = iwm_read_prph(sc, IWM_SCD_SRAM_BASE_ADDR);
+ if (sc->sched_base != base) {
device_printf(sc->sc_dev,
- "%s: sched addr mismatch",
- __func__);
- error = EINVAL;
- goto out;
+ "%s: sched addr mismatch: alive: 0x%x prph: 0x%x\n",
+ __func__, sc->sched_base, base);
}
iwm_ict_reset(sc);
@@ -1474,8 +1631,15 @@ iwm_post_alive(struct iwm_softc *sc)
iwm_write_prph(sc, IWM_SCD_CHAINEXT_EN, 0);
+ iwm_nic_unlock(sc);
+
/* enable command channel */
- iwm_enable_txq(sc, IWM_MVM_CMD_QUEUE, 7);
+ error = iwm_enable_txq(sc, 0 /* unused */, IWM_MVM_CMD_QUEUE, 7);
+ if (error)
+ return error;
+
+ if (!iwm_nic_lock(sc))
+ return EBUSY;
iwm_write_prph(sc, IWM_SCD_TXFACT, 0xff);
@@ -1490,11 +1654,13 @@ iwm_post_alive(struct iwm_softc *sc)
IWM_FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
/* Enable L1-Active */
- iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
- IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
+ iwm_clear_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
+ IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+ }
out:
- iwm_nic_unlock(sc);
+ iwm_nic_unlock(sc);
return error;
}
@@ -1508,17 +1674,25 @@ iwm_post_alive(struct iwm_softc *sc)
const int nvm_to_read[] = {
IWM_NVM_SECTION_TYPE_HW,
IWM_NVM_SECTION_TYPE_SW,
+ IWM_NVM_SECTION_TYPE_REGULATORY,
IWM_NVM_SECTION_TYPE_CALIBRATION,
IWM_NVM_SECTION_TYPE_PRODUCTION,
+ IWM_NVM_SECTION_TYPE_HW_8000,
+ IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
+ IWM_NVM_SECTION_TYPE_PHY_SKU,
};
/* Default NVM size to read */
-#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
-#define IWM_MAX_NVM_SECTION_SIZE 7000
+#define IWM_NVM_DEFAULT_CHUNK_SIZE (2*1024)
+#define IWM_MAX_NVM_SECTION_SIZE 8192
#define IWM_NVM_WRITE_OPCODE 1
#define IWM_NVM_READ_OPCODE 0
+/* load nvm chunk response */
+#define IWM_READ_NVM_CHUNK_SUCCEED 0
+#define IWM_READ_NVM_CHUNK_INVALID_ADDRESS 1
+
static int
iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
uint16_t offset, uint16_t length, uint8_t *data, uint16_t *len)
@@ -1538,20 +1712,24 @@ iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
IWM_CMD_SEND_IN_RFKILL,
.data = { &nvm_access_cmd, },
};
- int ret, bytes_read, offset_read;
+ int ret, offset_read;
+ size_t bytes_read;
uint8_t *resp_data;
cmd.len[0] = sizeof(struct iwm_nvm_access_cmd);
ret = iwm_send_cmd(sc, &cmd);
- if (ret)
+ if (ret) {
+ device_printf(sc->sc_dev,
+ "Could not send NVM_ACCESS command (error=%d)\n", ret);
return ret;
+ }
pkt = cmd.resp_pkt;
if (pkt->hdr.flags & IWM_CMD_FAILED_MSK) {
device_printf(sc->sc_dev,
- "%s: Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
- __func__, pkt->hdr.flags);
+ "Bad return from IWM_NVM_ACCES_COMMAND (0x%08X)\n",
+ pkt->hdr.flags);
ret = EIO;
goto exit;
}
@@ -1564,17 +1742,25 @@ iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
offset_read = le16toh(nvm_resp->offset);
resp_data = nvm_resp->data;
if (ret) {
- device_printf(sc->sc_dev,
- "%s: NVM access command failed with status %d\n",
- __func__, ret);
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "NVM access command failed with status %d\n", ret);
ret = EINVAL;
goto exit;
}
if (offset_read != offset) {
device_printf(sc->sc_dev,
- "%s: NVM ACCESS response with invalid offset %d\n",
- __func__, offset_read);
+ "NVM ACCESS response with invalid offset %d\n",
+ offset_read);
+ ret = EINVAL;
+ goto exit;
+ }
+
+ if (bytes_read > length) {
+ device_printf(sc->sc_dev,
+ "NVM ACCESS response with too much data "
+ "(%d bytes requested, %zd bytes received)\n",
+ length, bytes_read);
ret = EINVAL;
goto exit;
}
@@ -1589,7 +1775,7 @@ iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
/*
* Reads an NVM section completely.
- * NICs prior to 7000 family doesn't have a real NVM, but just read
+ * NICs prior to 7000 family don't have a real NVM, but just read
* section 0 which is the EEPROM. Because the EEPROM reading is unlimited
* by uCode, we need to manually check in this case that we don't
* overflow and try to read more than the EEPROM size.
@@ -1599,32 +1785,34 @@ iwm_nvm_read_chunk(struct iwm_softc *sc, uint16_t section,
*/
static int
iwm_nvm_read_section(struct iwm_softc *sc,
- uint16_t section, uint8_t *data, uint16_t *len)
+ uint16_t section, uint8_t *data, uint16_t *len, size_t max_len)
{
- uint16_t length, seglen;
- int error;
+ uint16_t chunklen, seglen;
+ int error = 0;
- /* Set nvm section read length */
- length = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "reading NVM section %d\n", section);
+
+ chunklen = seglen = IWM_NVM_DEFAULT_CHUNK_SIZE;
*len = 0;
- /* Read the NVM until exhausted (reading less than requested) */
- while (seglen == length) {
+ /* Read NVM chunks until exhausted (reading less than requested) */
+ while (seglen == chunklen && *len < max_len) {
error = iwm_nvm_read_chunk(sc,
- section, *len, length, data, &seglen);
+ section, *len, chunklen, data, &seglen);
if (error) {
- device_printf(sc->sc_dev,
- "Cannot read NVM from section "
- "%d offset %d, length %d\n",
- section, *len, length);
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "Cannot read from NVM section "
+ "%d at offset %d\n", section, *len);
return error;
}
*len += seglen;
}
IWM_DPRINTF(sc, IWM_DEBUG_RESET,
- "NVM section %d read completed\n", section);
- return 0;
+ "NVM section %d read completed (%d bytes, error=%d)\n",
+ section, *len, error);
+ return error;
}
/*
@@ -1634,7 +1822,7 @@ iwm_nvm_read_section(struct iwm_softc *sc,
/* iwlwifi/iwl-nvm-parse.c */
/* NVM offsets (in words) definitions */
-enum wkp_nvm_offsets {
+enum iwm_nvm_offsets {
/* NVM HW-Section offset (in words) definitions */
IWM_HW_ADDR = 0x15,
@@ -1651,6 +1839,32 @@ enum wkp_nvm_offsets {
IWM_XTAL_CALIB = 0x316 - IWM_NVM_CALIB_SECTION
};
+enum iwm_8000_nvm_offsets {
+ /* NVM HW-Section offset (in words) definitions */
+ IWM_HW_ADDR0_WFPM_8000 = 0x12,
+ IWM_HW_ADDR1_WFPM_8000 = 0x16,
+ IWM_HW_ADDR0_PCIE_8000 = 0x8A,
+ IWM_HW_ADDR1_PCIE_8000 = 0x8E,
+ IWM_MAC_ADDRESS_OVERRIDE_8000 = 1,
+
+ /* NVM SW-Section offset (in words) definitions */
+ IWM_NVM_SW_SECTION_8000 = 0x1C0,
+ IWM_NVM_VERSION_8000 = 0,
+ IWM_RADIO_CFG_8000 = 0,
+ IWM_SKU_8000 = 2,
+ IWM_N_HW_ADDRS_8000 = 3,
+
+ /* NVM REGULATORY -Section offset (in words) definitions */
+ IWM_NVM_CHANNELS_8000 = 0,
+ IWM_NVM_LAR_OFFSET_8000_OLD = 0x4C7,
+ IWM_NVM_LAR_OFFSET_8000 = 0x507,
+ IWM_NVM_LAR_ENABLED_8000 = 0x7,
+
+ /* NVM calibration section offset (in words) definitions */
+ IWM_NVM_CALIB_SECTION_8000 = 0x2B8,
+ IWM_XTAL_CALIB_8000 = 0x316 - IWM_NVM_CALIB_SECTION_8000
+};
+
/* SKU Capabilities (actual values from NVM definition) */
enum nvm_sku_bits {
IWM_NVM_SKU_CAP_BAND_24GHZ = (1 << 0),
@@ -1667,6 +1881,13 @@ enum nvm_sku_bits {
#define IWM_NVM_RF_CFG_TX_ANT_MSK(x) ((x >> 8) & 0xF) /* bits 8-11 */
#define IWM_NVM_RF_CFG_RX_ANT_MSK(x) ((x >> 12) & 0xF) /* bits 12-15 */
+#define IWM_NVM_RF_CFG_FLAVOR_MSK_8000(x) (x & 0xF)
+#define IWM_NVM_RF_CFG_DASH_MSK_8000(x) ((x >> 4) & 0xF)
+#define IWM_NVM_RF_CFG_STEP_MSK_8000(x) ((x >> 8) & 0xF)
+#define IWM_NVM_RF_CFG_TYPE_MSK_8000(x) ((x >> 12) & 0xFFF)
+#define IWM_NVM_RF_CFG_TX_ANT_MSK_8000(x) ((x >> 24) & 0xF)
+#define IWM_NVM_RF_CFG_RX_ANT_MSK_8000(x) ((x >> 28) & 0xF)
+
#define DEFAULT_MAX_TX_POWER 16
/**
@@ -1718,7 +1939,8 @@ iwm_eeprom_channel_flags(uint16_t ch_flags)
static void
iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
- int maxchans, int *nchans, int ch_idx, int ch_num, const uint8_t bands[])
+ int maxchans, int *nchans, int ch_idx, size_t ch_num,
+ const uint8_t bands[])
{
const uint16_t * const nvm_ch_flags = sc->sc_nvm.nvm_ch_flags;
uint32_t nflags;
@@ -1728,7 +1950,10 @@ iwm_add_channel_band(struct iwm_softc *sc, struct ieee80211_channel chans[],
for (; ch_idx < ch_num; ch_idx++) {
ch_flags = le16_to_cpup(nvm_ch_flags + ch_idx);
- ieee = iwm_nvm_channels[ch_idx];
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
+ ieee = iwm_nvm_channels[ch_idx];
+ else
+ ieee = iwm_nvm_channels_8000[ch_idx];
if (!(ch_flags & IWM_NVM_CHANNEL_VALID)) {
IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
@@ -1760,6 +1985,7 @@ iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
struct iwm_softc *sc = ic->ic_softc;
struct iwm_nvm_data *data = &sc->sc_nvm;
uint8_t bands[IEEE80211_MODE_BYTES];
+ size_t ch_num;
memset(bands, 0, sizeof(bands));
/* 1-13: 11b/g channels. */
@@ -1774,51 +2000,182 @@ iwm_init_channel_map(struct ieee80211com *ic, int maxchans, int *nchans,
IWM_NUM_2GHZ_CHANNELS - 1, IWM_NUM_2GHZ_CHANNELS, bands);
if (data->sku_cap_band_52GHz_enable) {
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
+ ch_num = nitems(iwm_nvm_channels);
+ else
+ ch_num = nitems(iwm_nvm_channels_8000);
memset(bands, 0, sizeof(bands));
setbit(bands, IEEE80211_MODE_11A);
iwm_add_channel_band(sc, chans, maxchans, nchans,
- IWM_NUM_2GHZ_CHANNELS, nitems(iwm_nvm_channels), bands);
+ IWM_NUM_2GHZ_CHANNELS, ch_num, bands);
}
}
+static void
+iwm_set_hw_address_8000(struct iwm_softc *sc, struct iwm_nvm_data *data,
+ const uint16_t *mac_override, const uint16_t *nvm_hw)
+{
+ const uint8_t *hw_addr;
+
+ if (mac_override) {
+ static const uint8_t reserved_mac[] = {
+ 0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+ };
+
+ hw_addr = (const uint8_t *)(mac_override +
+ IWM_MAC_ADDRESS_OVERRIDE_8000);
+
+ /*
+ * Store the MAC address from MAO section.
+ * No byte swapping is required in MAO section
+ */
+ IEEE80211_ADDR_COPY(data->hw_addr, hw_addr);
+
+ /*
+ * Force the use of the OTP MAC address in case of reserved MAC
+ * address in the NVM, or if address is given but invalid.
+ */
+ if (!IEEE80211_ADDR_EQ(reserved_mac, hw_addr) &&
+ !IEEE80211_ADDR_EQ(ieee80211broadcastaddr, data->hw_addr) &&
+ iwm_is_valid_ether_addr(data->hw_addr) &&
+ !IEEE80211_IS_MULTICAST(data->hw_addr))
+ return;
+
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "%s: mac address from nvm override section invalid\n",
+ __func__);
+ }
+
+ if (nvm_hw) {
+ /* read the mac address from WFMP registers */
+ uint32_t mac_addr0 =
+ htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_0));
+ uint32_t mac_addr1 =
+ htole32(iwm_read_prph(sc, IWM_WFMP_MAC_ADDR_1));
+
+ hw_addr = (const uint8_t *)&mac_addr0;
+ data->hw_addr[0] = hw_addr[3];
+ data->hw_addr[1] = hw_addr[2];
+ data->hw_addr[2] = hw_addr[1];
+ data->hw_addr[3] = hw_addr[0];
+
+ hw_addr = (const uint8_t *)&mac_addr1;
+ data->hw_addr[4] = hw_addr[1];
+ data->hw_addr[5] = hw_addr[0];
+
+ return;
+ }
+
+ device_printf(sc->sc_dev, "%s: mac address not found\n", __func__);
+ memset(data->hw_addr, 0, sizeof(data->hw_addr));
+}
+
+static int
+iwm_get_sku(const struct iwm_softc *sc, const uint16_t *nvm_sw,
+ const uint16_t *phy_sku)
+{
+ if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + IWM_SKU);
+
+ return le32_to_cpup((const uint32_t *)(phy_sku + IWM_SKU_8000));
+}
+
+static int
+iwm_get_nvm_version(const struct iwm_softc *sc, const uint16_t *nvm_sw)
+{
+ if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
+ else
+ return le32_to_cpup((const uint32_t *)(nvm_sw +
+ IWM_NVM_VERSION_8000));
+}
+
+static int
+iwm_get_radio_cfg(const struct iwm_softc *sc, const uint16_t *nvm_sw,
+ const uint16_t *phy_sku)
+{
+ if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
+
+ return le32_to_cpup((const uint32_t *)(phy_sku + IWM_RADIO_CFG_8000));
+}
+
+static int
+iwm_get_n_hw_addrs(const struct iwm_softc *sc, const uint16_t *nvm_sw)
+{
+ int n_hw_addr;
+
+ if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000)
+ return le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
+
+ n_hw_addr = le32_to_cpup((const uint32_t *)(nvm_sw + IWM_N_HW_ADDRS_8000));
+
+ return n_hw_addr & IWM_N_HW_ADDR_MASK;
+}
+
+static void
+iwm_set_radio_cfg(const struct iwm_softc *sc, struct iwm_nvm_data *data,
+ uint32_t radio_cfg)
+{
+ if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
+ data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
+ data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
+ data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
+ data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ return;
+ }
+
+ /* set the radio configuration for family 8000 */
+ data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK_8000(radio_cfg);
+ data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK_8000(radio_cfg);
+ data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK_8000(radio_cfg);
+ data->radio_cfg_pnum = IWM_NVM_RF_CFG_FLAVOR_MSK_8000(radio_cfg);
+ data->valid_tx_ant = IWM_NVM_RF_CFG_TX_ANT_MSK_8000(radio_cfg);
+ data->valid_rx_ant = IWM_NVM_RF_CFG_RX_ANT_MSK_8000(radio_cfg);
+}
+
static int
iwm_parse_nvm_data(struct iwm_softc *sc,
- const uint16_t *nvm_hw, const uint16_t *nvm_sw,
- const uint16_t *nvm_calib, uint8_t tx_chains, uint8_t rx_chains)
+ const uint16_t *nvm_hw, const uint16_t *nvm_sw,
+ const uint16_t *nvm_calib, const uint16_t *mac_override,
+ const uint16_t *phy_sku, const uint16_t *regulatory)
{
struct iwm_nvm_data *data = &sc->sc_nvm;
uint8_t hw_addr[IEEE80211_ADDR_LEN];
- uint16_t radio_cfg, sku;
+ uint32_t sku, radio_cfg;
- data->nvm_version = le16_to_cpup(nvm_sw + IWM_NVM_VERSION);
+ data->nvm_version = iwm_get_nvm_version(sc, nvm_sw);
- radio_cfg = le16_to_cpup(nvm_sw + IWM_RADIO_CFG);
- data->radio_cfg_type = IWM_NVM_RF_CFG_TYPE_MSK(radio_cfg);
- data->radio_cfg_step = IWM_NVM_RF_CFG_STEP_MSK(radio_cfg);
- data->radio_cfg_dash = IWM_NVM_RF_CFG_DASH_MSK(radio_cfg);
- data->radio_cfg_pnum = IWM_NVM_RF_CFG_PNUM_MSK(radio_cfg);
+ radio_cfg = iwm_get_radio_cfg(sc, nvm_sw, phy_sku);
+ iwm_set_radio_cfg(sc, data, radio_cfg);
- sku = le16_to_cpup(nvm_sw + IWM_SKU);
+ sku = iwm_get_sku(sc, nvm_sw, phy_sku);
data->sku_cap_band_24GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_24GHZ;
data->sku_cap_band_52GHz_enable = sku & IWM_NVM_SKU_CAP_BAND_52GHZ;
data->sku_cap_11n_enable = 0;
- data->n_hw_addrs = le16_to_cpup(nvm_sw + IWM_N_HW_ADDRS);
-
- data->xtal_calib[0] = *(nvm_calib + IWM_XTAL_CALIB);
- data->xtal_calib[1] = *(nvm_calib + IWM_XTAL_CALIB + 1);
+ data->n_hw_addrs = iwm_get_n_hw_addrs(sc, nvm_sw);
/* The byte order is little endian 16 bit, meaning 214365 */
- IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
- data->hw_addr[0] = hw_addr[1];
- data->hw_addr[1] = hw_addr[0];
- data->hw_addr[2] = hw_addr[3];
- data->hw_addr[3] = hw_addr[2];
- data->hw_addr[4] = hw_addr[5];
- data->hw_addr[5] = hw_addr[4];
-
- memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
- sizeof(data->nvm_ch_flags));
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
+ IEEE80211_ADDR_COPY(hw_addr, nvm_hw + IWM_HW_ADDR);
+ data->hw_addr[0] = hw_addr[1];
+ data->hw_addr[1] = hw_addr[0];
+ data->hw_addr[2] = hw_addr[3];
+ data->hw_addr[3] = hw_addr[2];
+ data->hw_addr[4] = hw_addr[5];
+ data->hw_addr[5] = hw_addr[4];
+ } else {
+ iwm_set_hw_address_8000(sc, data, mac_override, nvm_hw);
+ }
+
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
+ memcpy(data->nvm_ch_flags, &nvm_sw[IWM_NVM_CHANNELS],
+ IWM_NUM_CHANNELS * sizeof(uint16_t));
+ } else {
+ memcpy(data->nvm_ch_flags, &regulatory[IWM_NVM_CHANNELS_8000],
+ IWM_NUM_CHANNELS_8000 * sizeof(uint16_t));
+ }
data->calib_version = 255; /* TODO:
this value will prevent some checks from
failing, we need to check if this
@@ -1832,30 +2189,61 @@ iwm_parse_nvm_data(struct iwm_softc *sc,
* END NVM PARSE
*/
-struct iwm_nvm_section {
- uint16_t length;
- uint8_t *data;
-};
-
static int
iwm_parse_nvm_sections(struct iwm_softc *sc, struct iwm_nvm_section *sections)
{
- const uint16_t *hw, *sw, *calib;
+ const uint16_t *hw, *sw, *calib, *regulatory, *mac_override, *phy_sku;
/* Checking for required sections */
- if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
- !sections[IWM_NVM_SECTION_TYPE_HW].data) {
- device_printf(sc->sc_dev,
- "%s: Can't parse empty NVM sections\n",
- __func__);
- return ENOENT;
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
+ if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
+ !sections[IWM_NVM_SECTION_TYPE_HW].data) {
+ device_printf(sc->sc_dev,
+ "Can't parse empty OTP/NVM sections\n");
+ return ENOENT;
+ }
+
+ hw = (const uint16_t *) sections[IWM_NVM_SECTION_TYPE_HW].data;
+ } else if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
+ /* SW and REGULATORY sections are mandatory */
+ if (!sections[IWM_NVM_SECTION_TYPE_SW].data ||
+ !sections[IWM_NVM_SECTION_TYPE_REGULATORY].data) {
+ device_printf(sc->sc_dev,
+ "Can't parse empty OTP/NVM sections\n");
+ return ENOENT;
+ }
+ /* MAC_OVERRIDE or at least HW section must exist */
+ if (!sections[IWM_NVM_SECTION_TYPE_HW_8000].data &&
+ !sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data) {
+ device_printf(sc->sc_dev,
+ "Can't parse mac_address, empty sections\n");
+ return ENOENT;
+ }
+
+ /* PHY_SKU section is mandatory in B0 */
+ if (!sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data) {
+ device_printf(sc->sc_dev,
+ "Can't parse phy_sku in B0, empty sections\n");
+ return ENOENT;
+ }
+
+ hw = (const uint16_t *)
+ sections[IWM_NVM_SECTION_TYPE_HW_8000].data;
+ } else {
+ panic("unknown device family %d\n", sc->sc_device_family);
}
- hw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_HW].data;
sw = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_SW].data;
- calib = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
- return iwm_parse_nvm_data(sc, hw, sw, calib,
- IWM_FW_VALID_TX_ANT(sc), IWM_FW_VALID_RX_ANT(sc));
+ calib = (const uint16_t *)
+ sections[IWM_NVM_SECTION_TYPE_CALIBRATION].data;
+ regulatory = (const uint16_t *)
+ sections[IWM_NVM_SECTION_TYPE_REGULATORY].data;
+ mac_override = (const uint16_t *)
+ sections[IWM_NVM_SECTION_TYPE_MAC_OVERRIDE].data;
+ phy_sku = (const uint16_t *)sections[IWM_NVM_SECTION_TYPE_PHY_SKU].data;
+
+ return iwm_parse_nvm_data(sc, hw, sw, calib, mac_override,
+ phy_sku, regulatory);
}
static int
@@ -1864,38 +2252,34 @@ iwm_nvm_init(struct iwm_softc *sc)
struct iwm_nvm_section nvm_sections[IWM_NVM_NUM_OF_SECTIONS];
int i, section, error;
uint16_t len;
- uint8_t *nvm_buffer, *temp;
+ uint8_t *buf;
+ const size_t bufsz = IWM_MAX_NVM_SECTION_SIZE;
- /* Read From FW NVM */
- IWM_DPRINTF(sc, IWM_DEBUG_EEPROM,
- "%s: Read NVM\n",
- __func__);
+ memset(nvm_sections, 0 , sizeof(nvm_sections));
- memset(nvm_sections, 0, sizeof(nvm_sections));
+ buf = malloc(bufsz, M_DEVBUF, M_NOWAIT);
+ if (buf == NULL)
+ return ENOMEM;
- /* TODO: find correct NVM max size for a section */
- nvm_buffer = malloc(IWM_OTP_LOW_IMAGE_SIZE, M_DEVBUF, M_NOWAIT);
- if (nvm_buffer == NULL)
- return (ENOMEM);
for (i = 0; i < nitems(nvm_to_read); i++) {
section = nvm_to_read[i];
KASSERT(section <= nitems(nvm_sections),
("too many sections"));
- error = iwm_nvm_read_section(sc, section, nvm_buffer, &len);
- if (error)
- break;
-
- temp = malloc(len, M_DEVBUF, M_NOWAIT);
- if (temp == NULL) {
+ error = iwm_nvm_read_section(sc, section, buf, &len, bufsz);
+ if (error) {
+ error = 0;
+ continue;
+ }
+ nvm_sections[section].data = malloc(len, M_DEVBUF, M_NOWAIT);
+ if (nvm_sections[section].data == NULL) {
error = ENOMEM;
break;
}
- memcpy(temp, nvm_buffer, len);
- nvm_sections[section].data = temp;
+ memcpy(nvm_sections[section].data, buf, len);
nvm_sections[section].length = len;
}
- free(nvm_buffer, M_DEVBUF);
+ free(buf, M_DEVBUF);
if (error == 0)
error = iwm_parse_nvm_sections(sc, nvm_sections);
@@ -1913,21 +2297,52 @@ iwm_nvm_init(struct iwm_softc *sc)
*/
static int
-iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
+iwm_firmware_load_sect(struct iwm_softc *sc, uint32_t dst_addr,
const uint8_t *section, uint32_t byte_cnt)
{
+ int error = EINVAL;
+ uint32_t chunk_sz, offset;
+
+ chunk_sz = MIN(IWM_FH_MEM_TB_MAX_LENGTH, byte_cnt);
+
+ for (offset = 0; offset < byte_cnt; offset += chunk_sz) {
+ uint32_t addr, len;
+ const uint8_t *data;
+
+ addr = dst_addr + offset;
+ len = MIN(chunk_sz, byte_cnt - offset);
+ data = section + offset;
+
+ error = iwm_firmware_load_chunk(sc, addr, data, len);
+ if (error)
+ break;
+ }
+
+ return error;
+}
+
+static int
+iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
+ const uint8_t *chunk, uint32_t byte_cnt)
+{
struct iwm_dma_info *dma = &sc->fw_dma;
int error;
- /* Copy firmware section into pre-allocated DMA-safe memory. */
- memcpy(dma->vaddr, section, byte_cnt);
+ /* Copy firmware chunk into pre-allocated DMA-safe memory. */
+ memcpy(dma->vaddr, chunk, byte_cnt);
bus_dmamap_sync(dma->tag, dma->map, BUS_DMASYNC_PREWRITE);
- if (!iwm_nic_lock(sc))
- return EBUSY;
+ if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
+ dst_addr <= IWM_FW_MEM_EXTENDED_END) {
+ iwm_set_bits_prph(sc, IWM_LMPM_CHICK,
+ IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
+ }
sc->sc_fw_chunk_done = 0;
+ if (!iwm_nic_lock(sc))
+ return EBUSY;
+
IWM_WRITE(sc, IWM_FH_TCSR_CHNL_TX_CONFIG_REG(IWM_FH_SRVC_CHNL),
IWM_FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
IWM_WRITE(sc, IWM_FH_SRVC_CHNL_SRAM_ADDR_REG(IWM_FH_SRVC_CHNL),
@@ -1953,14 +2368,133 @@ iwm_firmware_load_chunk(struct iwm_softc *sc, uint32_t dst_addr,
if ((error = msleep(&sc->sc_fw, &sc->sc_mtx, 0, "iwmfw", hz)) != 0)
break;
+ if (!sc->sc_fw_chunk_done) {
+ device_printf(sc->sc_dev,
+ "fw chunk addr 0x%x len %d failed to load\n",
+ dst_addr, byte_cnt);
+ }
+
+ if (dst_addr >= IWM_FW_MEM_EXTENDED_START &&
+ dst_addr <= IWM_FW_MEM_EXTENDED_END && iwm_nic_lock(sc)) {
+ iwm_clear_bits_prph(sc, IWM_LMPM_CHICK,
+ IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE);
+ iwm_nic_unlock(sc);
+ }
+
return error;
}
+int
+iwm_load_cpu_sections_8000(struct iwm_softc *sc, struct iwm_fw_sects *fws,
+ int cpu, int *first_ucode_section)
+{
+ int shift_param;
+ int i, error = 0, sec_num = 0x1;
+ uint32_t val, last_read_idx = 0;
+ const void *data;
+ uint32_t dlen;
+ uint32_t offset;
+
+ if (cpu == 1) {
+ shift_param = 0;
+ *first_ucode_section = 0;
+ } else {
+ shift_param = 16;
+ (*first_ucode_section)++;
+ }
+
+ for (i = *first_ucode_section; i < IWM_UCODE_SECT_MAX; i++) {
+ last_read_idx = i;
+ data = fws->fw_sect[i].fws_data;
+ dlen = fws->fw_sect[i].fws_len;
+ offset = fws->fw_sect[i].fws_devoff;
+
+ /*
+ * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
+ * CPU1 to CPU2.
+ * PAGING_SEPARATOR_SECTION delimiter - separate between
+ * CPU2 non paged to CPU2 paging sec.
+ */
+ if (!data || offset == IWM_CPU1_CPU2_SEPARATOR_SECTION ||
+ offset == IWM_PAGING_SEPARATOR_SECTION)
+ break;
+
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "LOAD FIRMWARE chunk %d offset 0x%x len %d for cpu %d\n",
+ i, offset, dlen, cpu);
+
+ if (dlen > sc->sc_fwdmasegsz) {
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "chunk %d too large (%d bytes)\n", i, dlen);
+ error = EFBIG;
+ } else {
+ error = iwm_firmware_load_sect(sc, offset, data, dlen);
+ }
+ if (error) {
+ device_printf(sc->sc_dev,
+ "could not load firmware chunk %d (error %d)\n",
+ i, error);
+ return error;
+ }
+
+ /* Notify the ucode of the loaded section number and status */
+ if (iwm_nic_lock(sc)) {
+ val = IWM_READ(sc, IWM_FH_UCODE_LOAD_STATUS);
+ val = val | (sec_num << shift_param);
+ IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, val);
+ sec_num = (sec_num << 1) | 0x1;
+ iwm_nic_unlock(sc);
+
+ /*
+ * The firmware won't load correctly without this delay.
+ */
+ DELAY(8000);
+ }
+ }
+
+ *first_ucode_section = last_read_idx;
+
+ if (iwm_nic_lock(sc)) {
+ if (cpu == 1)
+ IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFF);
+ else
+ IWM_WRITE(sc, IWM_FH_UCODE_LOAD_STATUS, 0xFFFFFFFF);
+ iwm_nic_unlock(sc);
+ }
+
+ return 0;
+}
+
+int
+iwm_load_firmware_8000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
+{
+ struct iwm_fw_sects *fws;
+ int error = 0;
+ int first_ucode_section;
+
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET, "loading ucode type %d\n",
+ ucode_type);
+
+ fws = &sc->sc_fw.fw_sects[ucode_type];
+
+ /* configure the ucode to be ready to get the secured image */
+ /* release CPU reset */
+ iwm_write_prph(sc, IWM_RELEASE_CPU_RESET, IWM_RELEASE_CPU_RESET_BIT);
+
+ /* load to FW the binary Secured sections of CPU1 */
+ error = iwm_load_cpu_sections_8000(sc, fws, 1, &first_ucode_section);
+ if (error)
+ return error;
+
+ /* load to FW the binary sections of CPU2 */
+ return iwm_load_cpu_sections_8000(sc, fws, 2, &first_ucode_section);
+}
+
static int
-iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
+iwm_load_firmware_7000(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
{
struct iwm_fw_sects *fws;
- int error, i, w;
+ int error, i;
const void *data;
uint32_t dlen;
uint32_t offset;
@@ -1975,21 +2509,57 @@ iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
"LOAD FIRMWARE type %d offset %u len %d\n",
ucode_type, offset, dlen);
- error = iwm_firmware_load_chunk(sc, offset, data, dlen);
+ if (dlen > sc->sc_fwdmasegsz) {
+ IWM_DPRINTF(sc, IWM_DEBUG_FIRMWARE_TLV,
+ "chunk %d too large (%d bytes)\n", i, dlen);
+ error = EFBIG;
+ } else {
+ error = iwm_firmware_load_sect(sc, offset, data, dlen);
+ }
if (error) {
device_printf(sc->sc_dev,
- "%s: chunk %u of %u returned error %02d\n",
- __func__, i, fws->fw_count, error);
+ "could not load firmware chunk %u of %u "
+ "(error=%d)\n", i, fws->fw_count, error);
return error;
}
}
- /* wait for the firmware to load */
IWM_WRITE(sc, IWM_CSR_RESET, 0);
+ return 0;
+}
+
+static int
+iwm_load_firmware(struct iwm_softc *sc, enum iwm_ucode_type ucode_type)
+{
+ int error, w;
+
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
+ error = iwm_load_firmware_8000(sc, ucode_type);
+ else
+ error = iwm_load_firmware_7000(sc, ucode_type);
+ if (error)
+ return error;
+
+ /* wait for the firmware to load */
for (w = 0; !sc->sc_uc.uc_intr && w < 10; w++) {
error = msleep(&sc->sc_uc, &sc->sc_mtx, 0, "iwmuc", hz/10);
}
+ if (error || !sc->sc_uc.uc_ok) {
+ device_printf(sc->sc_dev, "could not load firmware\n");
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
+ device_printf(sc->sc_dev, "cpu1 status: 0x%x\n",
+ iwm_read_prph(sc, IWM_SB_CPU_1_STATUS));
+ device_printf(sc->sc_dev, "cpu2 status: 0x%x\n",
+ iwm_read_prph(sc, IWM_SB_CPU_2_STATUS));
+ }
+ }
+
+ /*
+ * Give the firmware some time to initialize.
+ * Accessing it too early causes errors.
+ */
+ msleep(&w, &sc->sc_mtx, 0, "iwmfwinit", hz);
return error;
}
@@ -2063,17 +2633,25 @@ iwm_mvm_load_ucode_wait_alive(struct iwm_softc *sc,
enum iwm_ucode_type old_type = sc->sc_uc_current;
int error;
- if ((error = iwm_read_firmware(sc, ucode_type)) != 0)
+ if ((error = iwm_read_firmware(sc, ucode_type)) != 0) {
+ device_printf(sc->sc_dev, "iwm_read_firmware: failed %d\n",
+ error);
return error;
+ }
sc->sc_uc_current = ucode_type;
error = iwm_start_fw(sc, ucode_type);
if (error) {
+ device_printf(sc->sc_dev, "iwm_start_fw: failed %d\n", error);
sc->sc_uc_current = old_type;
return error;
}
- return iwm_post_alive(sc);
+ error = iwm_post_alive(sc);
+ if (error) {
+ device_printf(sc->sc_dev, "iwm_fw_alive: failed %d\n", error);
+ }
+ return error;
}
/*
@@ -2109,26 +2687,31 @@ iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
}
IEEE80211_ADDR_COPY(sc->sc_ic.ic_macaddr, sc->sc_nvm.hw_addr);
- sc->sc_scan_cmd_len = sizeof(struct iwm_scan_cmd)
- + sc->sc_capa_max_probe_len
- + IWM_MAX_NUM_SCAN_CHANNELS
- * sizeof(struct iwm_scan_channel);
- sc->sc_scan_cmd = malloc(sc->sc_scan_cmd_len, M_DEVBUF,
- M_NOWAIT);
- if (sc->sc_scan_cmd == NULL)
- return (ENOMEM);
-
return 0;
}
+ if ((error = iwm_send_bt_init_conf(sc)) != 0) {
+ device_printf(sc->sc_dev,
+ "failed to send bt coex configuration: %d\n", error);
+ return error;
+ }
+
+ /* Init Smart FIFO. */
+ error = iwm_mvm_sf_config(sc, IWM_SF_INIT_OFF);
+ if (error != 0)
+ return error;
+
/* Send TX valid antennas before triggering calibrations */
- if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
+ if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
+ device_printf(sc->sc_dev,
+ "failed to send antennas before calibration: %d\n", error);
return error;
+ }
/*
- * Send phy configurations command to init uCode
- * to start the 16.0 uCode init image internal calibrations.
- */
+ * Send phy configurations command to init uCode
+ * to start the 16.0 uCode init image internal calibrations.
+ */
if ((error = iwm_send_phy_cfg_cmd(sc)) != 0 ) {
device_printf(sc->sc_dev,
"%s: failed to run internal calibration: %d\n",
@@ -2140,10 +2723,18 @@ iwm_run_init_mvm_ucode(struct iwm_softc *sc, int justnvm)
* Nothing to do but wait for the init complete notification
* from the firmware
*/
- while (!sc->sc_init_complete)
- if ((error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
- 0, "iwminit", 2*hz)) != 0)
+ while (!sc->sc_init_complete) {
+ error = msleep(&sc->sc_init_complete, &sc->sc_mtx,
+ 0, "iwminit", 2*hz);
+ if (error) {
+ device_printf(sc->sc_dev, "init complete failed: %d\n",
+ sc->sc_init_complete);
break;
+ }
+ }
+
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET, "init %scomplete\n",
+ sc->sc_init_complete ? "" : "not ");
return error;
}
@@ -2160,18 +2751,17 @@ iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
struct iwm_rx_data *data = &ring->data[idx];
struct mbuf *m;
bus_dmamap_t dmamap = NULL;
- int error;
- bus_addr_t paddr;
+ bus_dma_segment_t seg;
+ int nsegs, error;
m = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, IWM_RBUF_SIZE);
if (m == NULL)
return ENOBUFS;
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
- error = bus_dmamap_load(ring->data_dmat, ring->spare_map,
- mtod(m, void *), IWM_RBUF_SIZE, iwm_dma_map_addr,
- &paddr, BUS_DMA_NOWAIT);
- if (error != 0 && error != EFBIG) {
+ error = bus_dmamap_load_mbuf_sg(ring->data_dmat, ring->spare_map, m,
+ &seg, &nsegs, BUS_DMA_NOWAIT);
+ if (error != 0) {
device_printf(sc->sc_dev,
"%s: can't map mbuf, error %d\n", __func__, error);
goto fail;
@@ -2189,13 +2779,14 @@ iwm_rx_addbuf(struct iwm_softc *sc, int size, int idx)
data->m = m;
/* Update RX descriptor. */
- ring->desc[idx] = htole32(paddr >> 8);
+ KASSERT((seg.ds_addr & 255) == 0, ("seg.ds_addr not aligned"));
+ ring->desc[idx] = htole32(seg.ds_addr >> 8);
bus_dmamap_sync(ring->desc_dma.tag, ring->desc_dma.map,
BUS_DMASYNC_PREWRITE);
return 0;
fail:
- m_free(m);
+ m_freem(m);
return error;
}
@@ -2750,7 +3341,7 @@ iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
flags |= IWM_TX_CMD_FLG_ACK;
}
- if (type != IEEE80211_FC0_TYPE_DATA
+ if (type == IEEE80211_FC0_TYPE_DATA
&& (totlen + IEEE80211_CRC_LEN > vap->iv_rtsthreshold)
&& !IEEE80211_IS_MULTICAST(wh->i_addr1)) {
flags |= IWM_TX_CMD_FLG_PROT_REQUIRE;
@@ -2766,12 +3357,15 @@ iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
uint8_t subtype = wh->i_fc[0] & IEEE80211_FC0_SUBTYPE_MASK;
if (subtype == IEEE80211_FC0_SUBTYPE_ASSOC_REQ ||
- subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ)
- tx->pm_frame_timeout = htole16(3);
- else
- tx->pm_frame_timeout = htole16(2);
+ subtype == IEEE80211_FC0_SUBTYPE_REASSOC_REQ) {
+ tx->pm_frame_timeout = htole16(IWM_PM_FRAME_ASSOC);
+ } else if (subtype == IEEE80211_FC0_SUBTYPE_ACTION) {
+ tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
+ } else {
+ tx->pm_frame_timeout = htole16(IWM_PM_FRAME_MGMT);
+ }
} else {
- tx->pm_frame_timeout = htole16(0);
+ tx->pm_frame_timeout = htole16(IWM_PM_FRAME_NONE);
}
if (hdrlen & 3) {
@@ -2839,11 +3433,11 @@ iwm_tx(struct iwm_softc *sc, struct mbuf *m, struct ieee80211_node *ni, int ac)
KASSERT(data->in != NULL, ("node is NULL"));
IWM_DPRINTF(sc, IWM_DEBUG_XMIT,
- "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%d\n",
+ "sending data: qid=%d idx=%d len=%d nsegs=%d txflags=0x%08x rate_n_flags=0x%08x rateidx=%u\n",
ring->qid, ring->cur, totlen, nsegs,
le32toh(tx->tx_flags),
le32toh(tx->rate_n_flags),
- (int) tx->initial_rate_index
+ tx->initial_rate_index
);
/* Fill TX descriptor. */
@@ -2956,52 +3550,19 @@ iwm_mvm_flush_tx_path(struct iwm_softc *sc, int tfd_msk, int sync)
* BEGIN mvm/sta.c
*/
-static void
-iwm_mvm_add_sta_cmd_v6_to_v5(struct iwm_mvm_add_sta_cmd_v6 *cmd_v6,
- struct iwm_mvm_add_sta_cmd_v5 *cmd_v5)
-{
- memset(cmd_v5, 0, sizeof(*cmd_v5));
-
- cmd_v5->add_modify = cmd_v6->add_modify;
- cmd_v5->tid_disable_tx = cmd_v6->tid_disable_tx;
- cmd_v5->mac_id_n_color = cmd_v6->mac_id_n_color;
- IEEE80211_ADDR_COPY(cmd_v5->addr, cmd_v6->addr);
- cmd_v5->sta_id = cmd_v6->sta_id;
- cmd_v5->modify_mask = cmd_v6->modify_mask;
- cmd_v5->station_flags = cmd_v6->station_flags;
- cmd_v5->station_flags_msk = cmd_v6->station_flags_msk;
- cmd_v5->add_immediate_ba_tid = cmd_v6->add_immediate_ba_tid;
- cmd_v5->remove_immediate_ba_tid = cmd_v6->remove_immediate_ba_tid;
- cmd_v5->add_immediate_ba_ssn = cmd_v6->add_immediate_ba_ssn;
- cmd_v5->sleep_tx_count = cmd_v6->sleep_tx_count;
- cmd_v5->sleep_state_flags = cmd_v6->sleep_state_flags;
- cmd_v5->assoc_id = cmd_v6->assoc_id;
- cmd_v5->beamform_flags = cmd_v6->beamform_flags;
- cmd_v5->tfd_queue_msk = cmd_v6->tfd_queue_msk;
-}
-
static int
iwm_mvm_send_add_sta_cmd_status(struct iwm_softc *sc,
- struct iwm_mvm_add_sta_cmd_v6 *cmd, int *status)
+ struct iwm_mvm_add_sta_cmd_v7 *cmd, int *status)
{
- struct iwm_mvm_add_sta_cmd_v5 cmd_v5;
-
- if (sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_STA_KEY_CMD) {
- return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA,
- sizeof(*cmd), cmd, status);
- }
-
- iwm_mvm_add_sta_cmd_v6_to_v5(cmd, &cmd_v5);
-
- return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(cmd_v5),
- &cmd_v5, status);
+ return iwm_mvm_send_cmd_pdu_status(sc, IWM_ADD_STA, sizeof(*cmd),
+ cmd, status);
}
/* send station add/update command to firmware */
static int
iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
{
- struct iwm_mvm_add_sta_cmd_v6 add_sta_cmd;
+ struct iwm_mvm_add_sta_cmd_v7 add_sta_cmd;
int ret;
uint32_t status;
@@ -3012,12 +3573,19 @@ iwm_mvm_sta_send_to_fw(struct iwm_softc *sc, struct iwm_node *in, int update)
= htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID,
IWM_DEFAULT_COLOR));
if (!update) {
- add_sta_cmd.tfd_queue_msk = htole32(0xf);
+ int ac;
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ add_sta_cmd.tfd_queue_msk |=
+ htole32(1 << iwm_mvm_ac_to_tx_fifo[ac]);
+ }
IEEE80211_ADDR_COPY(&add_sta_cmd.addr, in->in_ni.ni_bssid);
}
add_sta_cmd.add_modify = update ? 1 : 0;
add_sta_cmd.station_flags_msk
|= htole32(IWM_STA_FLG_FAT_EN_MSK | IWM_STA_FLG_MIMO_EN_MSK);
+ add_sta_cmd.tid_disable_tx = htole16(0xffff);
+ if (update)
+ add_sta_cmd.modify_mask |= (IWM_STA_MODIFY_TID_DISABLE_TX);
status = IWM_ADD_STA_SUCCESS;
ret = iwm_mvm_send_add_sta_cmd_status(sc, &add_sta_cmd, &status);
@@ -3052,7 +3620,7 @@ static int
iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
const uint8_t *addr, uint16_t mac_id, uint16_t color)
{
- struct iwm_mvm_add_sta_cmd_v6 cmd;
+ struct iwm_mvm_add_sta_cmd_v7 cmd;
int ret;
uint32_t status;
@@ -3061,6 +3629,7 @@ iwm_mvm_add_int_sta_common(struct iwm_softc *sc, struct iwm_int_sta *sta,
cmd.mac_id_n_color = htole32(IWM_FW_CMD_ID_AND_COLOR(mac_id, color));
cmd.tfd_queue_msk = htole32(sta->tfd_queue_msk);
+ cmd.tid_disable_tx = htole16(0xffff);
if (addr)
IEEE80211_ADDR_COPY(cmd.addr, addr);
@@ -3089,8 +3658,12 @@ iwm_mvm_add_aux_sta(struct iwm_softc *sc)
{
int ret;
- sc->sc_aux_sta.sta_id = 3;
- sc->sc_aux_sta.tfd_queue_msk = 0;
+ sc->sc_aux_sta.sta_id = IWM_AUX_STA_ID;
+ sc->sc_aux_sta.tfd_queue_msk = (1 << IWM_MVM_AUX_QUEUE);
+
+ ret = iwm_enable_txq(sc, 0, IWM_MVM_AUX_QUEUE, IWM_MVM_TX_FIFO_MCAST);
+ if (ret)
+ return ret;
ret = iwm_mvm_add_int_sta_common(sc,
&sc->sc_aux_sta, NULL, IWM_MAC_INDEX_AUX, 0);
@@ -3210,6 +3783,10 @@ iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
in->in_assoc = 0;
+ error = iwm_mvm_sf_config(sc, IWM_SF_FULL_ON);
+ if (error != 0)
+ return error;
+
error = iwm_allow_mcast(vap, sc);
if (error) {
device_printf(sc->sc_dev,
@@ -3222,7 +3799,7 @@ iwm_auth(struct ieee80211vap *vap, struct iwm_softc *sc)
*
* Linux iwlwifi doesn't reset the nic each time, nor does it
* call ctxt_add() here. Instead, it adds it during vap creation,
- * and always does does a mac_ctx_changed().
+ * and always does a mac_ctx_changed().
*
* The openbsd port doesn't attempt to do that - it reset things
* at odd states and does the add here.
@@ -3342,7 +3919,7 @@ iwm_release(struct iwm_softc *sc, struct iwm_node *in)
* back to nothing anyway, we'll just do a complete device reset.
* Up your's, device!
*/
- //iwm_mvm_flush_tx_path(sc, 0xf, 1);
+ /* iwm_mvm_flush_tx_path(sc, 0xf, 1); */
iwm_stop_device(sc);
iwm_init_hw(sc);
if (in)
@@ -3448,6 +4025,10 @@ iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
memset(lq, 0, sizeof(*lq));
lq->sta_id = IWM_STATION_ID;
+ /* For HT, always enable RTS/CTS to avoid excessive retries. */
+ if (ni->ni_flags & IEEE80211_NODE_HT)
+ lq->flags |= IWM_LQ_FLAG_USE_RTS_MSK;
+
/*
* are these used? (we don't do SISO or MIMO)
* need to set them to non-zero, though, or we get an error.
@@ -3471,7 +4052,7 @@ iwm_setrates(struct iwm_softc *sc, struct iwm_node *in)
int nextant;
if (txant == 0)
- txant = IWM_FW_VALID_TX_ANT(sc);
+ txant = iwm_fw_valid_tx_ant(sc);
nextant = 1<<(ffs(txant)-1);
txant &= ~nextant;
@@ -3567,7 +4148,12 @@ iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
"Force transition to INIT; MGT=%d\n", arg);
IWM_UNLOCK(sc);
IEEE80211_LOCK(ic);
- vap->iv_newstate(vap, IEEE80211_S_INIT, arg);
+ /* Always pass arg as -1 since we can't Tx right now. */
+ /*
+ * XXX arg is just ignored anyway when transitioning
+ * to IEEE80211_S_INIT.
+ */
+ vap->iv_newstate(vap, IEEE80211_S_INIT, -1);
IWM_DPRINTF(sc, IWM_DEBUG_STATE,
"Going INIT->SCAN\n");
nstate = IEEE80211_S_SCAN;
@@ -3578,7 +4164,6 @@ iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
switch (nstate) {
case IEEE80211_S_INIT:
- sc->sc_scanband = 0;
break;
case IEEE80211_S_AUTH:
@@ -3630,6 +4215,7 @@ iwm_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg)
"%s: IWM_LQ_CMD failed\n", __func__);
}
+ iwm_mvm_led_enable(sc);
break;
}
@@ -3647,46 +4233,258 @@ iwm_endscan_cb(void *arg, int pending)
{
struct iwm_softc *sc = arg;
struct ieee80211com *ic = &sc->sc_ic;
- int done;
- int error;
IWM_DPRINTF(sc, IWM_DEBUG_SCAN | IWM_DEBUG_TRACE,
"%s: scan ended\n",
__func__);
- IWM_LOCK(sc);
- if (sc->sc_scanband == IEEE80211_CHAN_2GHZ &&
- sc->sc_nvm.sku_cap_band_52GHz_enable) {
- done = 0;
- if ((error = iwm_mvm_scan_request(sc,
- IEEE80211_CHAN_5GHZ, 0, NULL, 0)) != 0) {
- device_printf(sc->sc_dev,
- "could not initiate 5 GHz scan\n");
- done = 1;
+ ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
+}
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in default configuration
+ */
+static const uint32_t
+iwm_sf_full_timeout_def[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF),
+ htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWM_SF_AGG_UNICAST_AGING_TIMER_DEF),
+ htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWM_SF_MCAST_AGING_TIMER_DEF),
+ htole32(IWM_SF_MCAST_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWM_SF_BA_AGING_TIMER_DEF),
+ htole32(IWM_SF_BA_IDLE_TIMER_DEF)
+ },
+ {
+ htole32(IWM_SF_TX_RE_AGING_TIMER_DEF),
+ htole32(IWM_SF_TX_RE_IDLE_TIMER_DEF)
+ },
+};
+
+/*
+ * Aging and idle timeouts for the different possible scenarios
+ * in single BSS MAC configuration.
+ */
+static const uint32_t
+iwm_sf_full_timeout[IWM_SF_NUM_SCENARIO][IWM_SF_NUM_TIMEOUT_TYPES] = {
+ {
+ htole32(IWM_SF_SINGLE_UNICAST_AGING_TIMER),
+ htole32(IWM_SF_SINGLE_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWM_SF_AGG_UNICAST_AGING_TIMER),
+ htole32(IWM_SF_AGG_UNICAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWM_SF_MCAST_AGING_TIMER),
+ htole32(IWM_SF_MCAST_IDLE_TIMER)
+ },
+ {
+ htole32(IWM_SF_BA_AGING_TIMER),
+ htole32(IWM_SF_BA_IDLE_TIMER)
+ },
+ {
+ htole32(IWM_SF_TX_RE_AGING_TIMER),
+ htole32(IWM_SF_TX_RE_IDLE_TIMER)
+ },
+};
+
+static void
+iwm_mvm_fill_sf_command(struct iwm_softc *sc, struct iwm_sf_cfg_cmd *sf_cmd,
+ struct ieee80211_node *ni)
+{
+ int i, j, watermark;
+
+ sf_cmd->watermark[IWM_SF_LONG_DELAY_ON] = htole32(IWM_SF_W_MARK_SCAN);
+
+ /*
+ * If we are in association flow - check antenna configuration
+ * capabilities of the AP station, and choose the watermark accordingly.
+ */
+ if (ni) {
+ if (ni->ni_flags & IEEE80211_NODE_HT) {
+#ifdef notyet
+ if (ni->ni_rxmcs[2] != 0)
+ watermark = IWM_SF_W_MARK_MIMO3;
+ else if (ni->ni_rxmcs[1] != 0)
+ watermark = IWM_SF_W_MARK_MIMO2;
+ else
+#endif
+ watermark = IWM_SF_W_MARK_SISO;
+ } else {
+ watermark = IWM_SF_W_MARK_LEGACY;
}
+ /* default watermark value for unassociated mode. */
} else {
- done = 1;
+ watermark = IWM_SF_W_MARK_MIMO2;
}
+ sf_cmd->watermark[IWM_SF_FULL_ON] = htole32(watermark);
- if (done) {
- IWM_UNLOCK(sc);
- ieee80211_scan_done(TAILQ_FIRST(&ic->ic_vaps));
- IWM_LOCK(sc);
- sc->sc_scanband = 0;
+ for (i = 0; i < IWM_SF_NUM_SCENARIO; i++) {
+ for (j = 0; j < IWM_SF_NUM_TIMEOUT_TYPES; j++) {
+ sf_cmd->long_delay_timeouts[i][j] =
+ htole32(IWM_SF_LONG_DELAY_AGING_TIMER);
+ }
+ }
+
+ if (ni) {
+ memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout,
+ sizeof(iwm_sf_full_timeout));
+ } else {
+ memcpy(sf_cmd->full_on_timeouts, iwm_sf_full_timeout_def,
+ sizeof(iwm_sf_full_timeout_def));
+ }
+}
+
+static int
+iwm_mvm_sf_config(struct iwm_softc *sc, enum iwm_sf_state new_state)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct iwm_sf_cfg_cmd sf_cmd = {
+ .state = htole32(IWM_SF_FULL_ON),
+ };
+ int ret = 0;
+
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
+ sf_cmd.state |= htole32(IWM_SF_CFG_DUMMY_NOTIF_OFF);
+
+ switch (new_state) {
+ case IWM_SF_UNINIT:
+ case IWM_SF_INIT_OFF:
+ iwm_mvm_fill_sf_command(sc, &sf_cmd, NULL);
+ break;
+ case IWM_SF_FULL_ON:
+ iwm_mvm_fill_sf_command(sc, &sf_cmd, vap->iv_bss);
+ break;
+ default:
+ IWM_DPRINTF(sc, IWM_DEBUG_PWRSAVE,
+ "Invalid state: %d. not sending Smart Fifo cmd\n",
+ new_state);
+ return EINVAL;
+ }
+
+ ret = iwm_mvm_send_cmd_pdu(sc, IWM_REPLY_SF_CFG_CMD, IWM_CMD_ASYNC,
+ sizeof(sf_cmd), &sf_cmd);
+ return ret;
+}
+
+static int
+iwm_send_bt_init_conf(struct iwm_softc *sc)
+{
+ struct iwm_bt_coex_cmd bt_cmd;
+
+ bt_cmd.mode = htole32(IWM_BT_COEX_WIFI);
+ bt_cmd.enabled_modules = htole32(IWM_BT_COEX_HIGH_BAND_RET);
+
+ return iwm_mvm_send_cmd_pdu(sc, IWM_BT_CONFIG, 0, sizeof(bt_cmd),
+ &bt_cmd);
+}
+
+static int
+iwm_send_update_mcc_cmd(struct iwm_softc *sc, const char *alpha2)
+{
+ struct iwm_mcc_update_cmd mcc_cmd;
+ struct iwm_host_cmd hcmd = {
+ .id = IWM_MCC_UPDATE_CMD,
+ .flags = (IWM_CMD_SYNC | IWM_CMD_WANT_SKB),
+ .data = { &mcc_cmd },
+ };
+ int ret;
+#ifdef IWM_DEBUG
+ struct iwm_rx_packet *pkt;
+ struct iwm_mcc_update_resp_v1 *mcc_resp_v1 = NULL;
+ struct iwm_mcc_update_resp *mcc_resp;
+ int n_channels;
+ uint16_t mcc;
+#endif
+ int resp_v2 = isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2);
+
+ memset(&mcc_cmd, 0, sizeof(mcc_cmd));
+ mcc_cmd.mcc = htole16(alpha2[0] << 8 | alpha2[1]);
+ if ((sc->sc_ucode_api & IWM_UCODE_TLV_API_WIFI_MCC_UPDATE) ||
+ isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC))
+ mcc_cmd.source_id = IWM_MCC_SOURCE_GET_CURRENT;
+ else
+ mcc_cmd.source_id = IWM_MCC_SOURCE_OLD_FW;
+
+ if (resp_v2)
+ hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd);
+ else
+ hcmd.len[0] = sizeof(struct iwm_mcc_update_cmd_v1);
+
+ IWM_DPRINTF(sc, IWM_DEBUG_NODE,
+ "send MCC update to FW with '%c%c' src = %d\n",
+ alpha2[0], alpha2[1], mcc_cmd.source_id);
+
+ ret = iwm_send_cmd(sc, &hcmd);
+ if (ret)
+ return ret;
+
+#ifdef IWM_DEBUG
+ pkt = hcmd.resp_pkt;
+
+ /* Extract MCC response */
+ if (resp_v2) {
+ mcc_resp = (void *)pkt->data;
+ mcc = mcc_resp->mcc;
+ n_channels = le32toh(mcc_resp->n_channels);
+ } else {
+ mcc_resp_v1 = (void *)pkt->data;
+ mcc = mcc_resp_v1->mcc;
+ n_channels = le32toh(mcc_resp_v1->n_channels);
+ }
+
+ /* W/A for a FW/NVM issue - returns 0x00 for the world domain */
+ if (mcc == 0)
+ mcc = 0x3030; /* "00" - world */
+
+ IWM_DPRINTF(sc, IWM_DEBUG_NODE,
+ "regulatory domain '%c%c' (%d channels available)\n",
+ mcc >> 8, mcc & 0xff, n_channels);
+#endif
+ iwm_free_resp(sc, &hcmd);
+
+ return 0;
+}
+
+static void
+iwm_mvm_tt_tx_backoff(struct iwm_softc *sc, uint32_t backoff)
+{
+ struct iwm_host_cmd cmd = {
+ .id = IWM_REPLY_THERMAL_MNG_BACKOFF,
+ .len = { sizeof(uint32_t), },
+ .data = { &backoff, },
+ };
+
+ if (iwm_send_cmd(sc, &cmd) != 0) {
+ device_printf(sc->sc_dev,
+ "failed to change thermal tx backoff\n");
}
- IWM_UNLOCK(sc);
}
static int
iwm_init_hw(struct iwm_softc *sc)
{
struct ieee80211com *ic = &sc->sc_ic;
- int error, i, qid;
+ int error, i, ac;
- if ((error = iwm_start_hw(sc)) != 0)
+ if ((error = iwm_start_hw(sc)) != 0) {
+ printf("iwm_start_hw: failed %d\n", error);
return error;
+ }
if ((error = iwm_run_init_mvm_ucode(sc, 0)) != 0) {
+ printf("iwm_run_init_mvm_ucode: failed %d\n", error);
return error;
}
@@ -3707,19 +4505,32 @@ iwm_init_hw(struct iwm_softc *sc)
goto error;
}
- if ((error = iwm_send_tx_ant_cfg(sc, IWM_FW_VALID_TX_ANT(sc))) != 0)
+ if ((error = iwm_send_bt_init_conf(sc)) != 0) {
+ device_printf(sc->sc_dev, "bt init conf failed\n");
goto error;
+ }
+
+ if ((error = iwm_send_tx_ant_cfg(sc, iwm_fw_valid_tx_ant(sc))) != 0) {
+ device_printf(sc->sc_dev, "antenna config failed\n");
+ goto error;
+ }
/* Send phy db control command and then phy db calibration*/
- if ((error = iwm_send_phy_db_data(sc)) != 0)
+ if ((error = iwm_send_phy_db_data(sc)) != 0) {
+ device_printf(sc->sc_dev, "phy_db_data failed\n");
goto error;
+ }
- if ((error = iwm_send_phy_cfg_cmd(sc)) != 0)
+ if ((error = iwm_send_phy_cfg_cmd(sc)) != 0) {
+ device_printf(sc->sc_dev, "phy_cfg_cmd failed\n");
goto error;
+ }
/* Add auxiliary station for scanning */
- if ((error = iwm_mvm_add_aux_sta(sc)) != 0)
+ if ((error = iwm_mvm_add_aux_sta(sc)) != 0) {
+ device_printf(sc->sc_dev, "add_aux_sta failed\n");
goto error;
+ }
for (i = 0; i < IWM_NUM_PHY_CTX; i++) {
/*
@@ -3732,13 +4543,35 @@ iwm_init_hw(struct iwm_softc *sc)
goto error;
}
+ /* Initialize tx backoffs to the minimum. */
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000)
+ iwm_mvm_tt_tx_backoff(sc, 0);
+
error = iwm_mvm_power_update_device(sc);
if (error)
goto error;
- /* Mark TX rings as active. */
- for (qid = 0; qid < 4; qid++) {
- iwm_enable_txq(sc, qid, qid);
+ if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_LAR_SUPPORT)) {
+ if ((error = iwm_send_update_mcc_cmd(sc, "ZZ")) != 0)
+ goto error;
+ }
+
+ if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN)) {
+ if ((error = iwm_mvm_config_umac_scan(sc)) != 0)
+ goto error;
+ }
+
+ /* Enable Tx queues. */
+ for (ac = 0; ac < WME_NUM_AC; ac++) {
+ error = iwm_enable_txq(sc, IWM_STATION_ID, ac,
+ iwm_mvm_ac_to_tx_fifo[ac]);
+ if (error)
+ goto error;
+ }
+
+ if ((error = iwm_mvm_disable_beacon_filter(sc)) != 0) {
+ device_printf(sc->sc_dev, "failed to disable beacon filter\n");
+ goto error;
}
return 0;
@@ -3774,6 +4607,10 @@ iwm_allow_mcast(struct ieee80211vap *vap, struct iwm_softc *sc)
return (error);
}
+/*
+ * ifnet interfaces
+ */
+
static void
iwm_init(struct iwm_softc *sc)
{
@@ -3786,12 +4623,13 @@ iwm_init(struct iwm_softc *sc)
sc->sc_flags &= ~IWM_FLAG_STOPPED;
if ((error = iwm_init_hw(sc)) != 0) {
+ printf("iwm_init_hw failed %d\n", error);
iwm_stop(sc);
return;
}
/*
- * Ok, firmware loaded and we are jogging
+ * Ok, firmware loaded and we are jogging
*/
sc->sc_flags |= IWM_FLAG_HW_INITED;
callout_reset(&sc->sc_watchdog_to, hz, iwm_watchdog, sc);
@@ -3852,7 +4690,6 @@ iwm_stop(struct iwm_softc *sc)
sc->sc_flags &= ~IWM_FLAG_HW_INITED;
sc->sc_flags |= IWM_FLAG_STOPPED;
sc->sc_generation++;
- sc->sc_scanband = 0;
iwm_led_blink_stop(sc);
sc->sc_tx_timer = 0;
iwm_stop_device(sc);
@@ -3871,7 +4708,7 @@ iwm_watchdog(void *arg)
iwm_nic_error(sc);
#endif
ieee80211_restart_all(ic);
- counter_u64_add(ic->ic_oerrors, 1);
+ counter_u64_add(sc->sc_ic.ic_oerrors, 1);
return;
}
}
@@ -3914,8 +4751,8 @@ iwm_parent(struct ieee80211com *ic)
struct iwm_error_event_table {
uint32_t valid; /* (nonzero) valid, (0) log is empty */
uint32_t error_id; /* type of error */
- uint32_t pc; /* program counter */
- uint32_t blink1; /* branch link */
+ uint32_t trm_hw_status0; /* TRM HW status */
+ uint32_t trm_hw_status1; /* TRM HW status */
uint32_t blink2; /* branch link */
uint32_t ilink1; /* interrupt link */
uint32_t ilink2; /* interrupt link */
@@ -3927,8 +4764,9 @@ struct iwm_error_event_table {
uint32_t tsf_hi; /* network timestamp function timer */
uint32_t gp1; /* GP1 timer register */
uint32_t gp2; /* GP2 timer register */
- uint32_t gp3; /* GP3 timer register */
- uint32_t ucode_ver; /* uCode version */
+ uint32_t fw_rev_type; /* firmware revision type */
+ uint32_t major; /* uCode version major */
+ uint32_t minor; /* uCode version minor */
uint32_t hw_ver; /* HW Silicon version */
uint32_t brd_ver; /* HW board version */
uint32_t log_pc; /* log program counter */
@@ -3945,7 +4783,7 @@ struct iwm_error_event_table {
* time_flag */
uint32_t isr4; /* isr status register LMPM_NIC_ISR4:
* wico interrupt */
- uint32_t isr_pref; /* isr status register LMPM_NIC_PREF_STAT */
+ uint32_t last_cmd_id; /* last HCMD id handled by the firmware */
uint32_t wait_event; /* wait event() caller address */
uint32_t l2p_control; /* L2pControlField */
uint32_t l2p_duration; /* L2pDurationField */
@@ -3956,6 +4794,31 @@ struct iwm_error_event_table {
uint32_t u_timestamp; /* indicate when the date and time of the
* compilation */
uint32_t flow_handler; /* FH read/write pointers, RX credit */
+} __packed /* LOG_ERROR_TABLE_API_S_VER_3 */;
+
+/*
+ * UMAC error struct - relevant starting from family 8000 chip.
+ * Note: This structure is read from the device with IO accesses,
+ * and the reading already does the endian conversion. As it is
+ * read with u32-sized accesses, any members with a different size
+ * need to be ordered correctly though!
+ */
+struct iwm_umac_error_event_table {
+ uint32_t valid; /* (nonzero) valid, (0) log is empty */
+ uint32_t error_id; /* type of error */
+ uint32_t blink1; /* branch link */
+ uint32_t blink2; /* branch link */
+ uint32_t ilink1; /* interrupt link */
+ uint32_t ilink2; /* interrupt link */
+ uint32_t data1; /* error-specific data */
+ uint32_t data2; /* error-specific data */
+ uint32_t data3; /* error-specific data */
+ uint32_t umac_major;
+ uint32_t umac_minor;
+ uint32_t frame_pointer; /* core register 27*/
+ uint32_t stack_pointer; /* core register 28 */
+ uint32_t cmd_header; /* latest host cmd sent to UMAC */
+ uint32_t nic_isr_pref; /* ISR status register */
} __packed;
#define ERROR_START_OFFSET (1 * sizeof(uint32_t))
@@ -3997,6 +4860,53 @@ iwm_desc_lookup(uint32_t num)
return advanced_lookup[i].name;
}
+static void
+iwm_nic_umac_error(struct iwm_softc *sc)
+{
+ struct iwm_umac_error_event_table table;
+ uint32_t base;
+
+ base = sc->sc_uc.uc_umac_error_event_table;
+
+ if (base < 0x800000) {
+ device_printf(sc->sc_dev, "Invalid error log pointer 0x%08x\n",
+ base);
+ return;
+ }
+
+ if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
+ device_printf(sc->sc_dev, "reading errlog failed\n");
+ return;
+ }
+
+ if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
+ device_printf(sc->sc_dev, "Start UMAC Error Log Dump:\n");
+ device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
+ sc->sc_flags, table.valid);
+ }
+
+ device_printf(sc->sc_dev, "0x%08X | %s\n", table.error_id,
+ iwm_desc_lookup(table.error_id));
+ device_printf(sc->sc_dev, "0x%08X | umac branchlink1\n", table.blink1);
+ device_printf(sc->sc_dev, "0x%08X | umac branchlink2\n", table.blink2);
+ device_printf(sc->sc_dev, "0x%08X | umac interruptlink1\n",
+ table.ilink1);
+ device_printf(sc->sc_dev, "0x%08X | umac interruptlink2\n",
+ table.ilink2);
+ device_printf(sc->sc_dev, "0x%08X | umac data1\n", table.data1);
+ device_printf(sc->sc_dev, "0x%08X | umac data2\n", table.data2);
+ device_printf(sc->sc_dev, "0x%08X | umac data3\n", table.data3);
+ device_printf(sc->sc_dev, "0x%08X | umac major\n", table.umac_major);
+ device_printf(sc->sc_dev, "0x%08X | umac minor\n", table.umac_minor);
+ device_printf(sc->sc_dev, "0x%08X | frame pointer\n",
+ table.frame_pointer);
+ device_printf(sc->sc_dev, "0x%08X | stack pointer\n",
+ table.stack_pointer);
+ device_printf(sc->sc_dev, "0x%08X | last host cmd\n", table.cmd_header);
+ device_printf(sc->sc_dev, "0x%08X | isr status reg\n",
+ table.nic_isr_pref);
+}
+
/*
* Support for dumping the error log seemed like a good idea ...
* but it's mostly hex junk and the only sensible thing is the
@@ -4012,13 +4922,13 @@ iwm_nic_error(struct iwm_softc *sc)
device_printf(sc->sc_dev, "dumping device error log\n");
base = sc->sc_uc.uc_error_event_table;
- if (base < 0x800000 || base >= 0x80C000) {
+ if (base < 0x800000) {
device_printf(sc->sc_dev,
- "Not valid error log pointer 0x%08x\n", base);
+ "Invalid error log pointer 0x%08x\n", base);
return;
}
- if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t)) != 0) {
+ if (iwm_read_mem(sc, base, &table, sizeof(table)/sizeof(uint32_t))) {
device_printf(sc->sc_dev, "reading errlog failed\n");
return;
}
@@ -4029,15 +4939,17 @@ iwm_nic_error(struct iwm_softc *sc)
}
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
- device_printf(sc->sc_dev, "Start IWL Error Log Dump:\n");
+ device_printf(sc->sc_dev, "Start Error Log Dump:\n");
device_printf(sc->sc_dev, "Status: 0x%x, count: %d\n",
sc->sc_flags, table.valid);
}
device_printf(sc->sc_dev, "0x%08X | %-28s\n", table.error_id,
- iwm_desc_lookup(table.error_id));
- device_printf(sc->sc_dev, "%08X | uPc\n", table.pc);
- device_printf(sc->sc_dev, "%08X | branchlink1\n", table.blink1);
+ iwm_desc_lookup(table.error_id));
+ device_printf(sc->sc_dev, "%08X | trm_hw_status0\n",
+ table.trm_hw_status0);
+ device_printf(sc->sc_dev, "%08X | trm_hw_status1\n",
+ table.trm_hw_status1);
device_printf(sc->sc_dev, "%08X | branchlink2\n", table.blink2);
device_printf(sc->sc_dev, "%08X | interruptlink1\n", table.ilink1);
device_printf(sc->sc_dev, "%08X | interruptlink2\n", table.ilink2);
@@ -4049,8 +4961,10 @@ iwm_nic_error(struct iwm_softc *sc)
device_printf(sc->sc_dev, "%08X | tsf hi\n", table.tsf_hi);
device_printf(sc->sc_dev, "%08X | time gp1\n", table.gp1);
device_printf(sc->sc_dev, "%08X | time gp2\n", table.gp2);
- device_printf(sc->sc_dev, "%08X | time gp3\n", table.gp3);
- device_printf(sc->sc_dev, "%08X | uCode version\n", table.ucode_ver);
+ device_printf(sc->sc_dev, "%08X | uCode revision type\n",
+ table.fw_rev_type);
+ device_printf(sc->sc_dev, "%08X | uCode version major\n", table.major);
+ device_printf(sc->sc_dev, "%08X | uCode version minor\n", table.minor);
device_printf(sc->sc_dev, "%08X | hw version\n", table.hw_ver);
device_printf(sc->sc_dev, "%08X | board version\n", table.brd_ver);
device_printf(sc->sc_dev, "%08X | hcmd\n", table.hcmd);
@@ -4059,7 +4973,7 @@ iwm_nic_error(struct iwm_softc *sc)
device_printf(sc->sc_dev, "%08X | isr2\n", table.isr2);
device_printf(sc->sc_dev, "%08X | isr3\n", table.isr3);
device_printf(sc->sc_dev, "%08X | isr4\n", table.isr4);
- device_printf(sc->sc_dev, "%08X | isr_pref\n", table.isr_pref);
+ device_printf(sc->sc_dev, "%08X | last cmd Id\n", table.last_cmd_id);
device_printf(sc->sc_dev, "%08X | wait_event\n", table.wait_event);
device_printf(sc->sc_dev, "%08X | l2p_control\n", table.l2p_control);
device_printf(sc->sc_dev, "%08X | l2p_duration\n", table.l2p_duration);
@@ -4068,6 +4982,9 @@ iwm_nic_error(struct iwm_softc *sc)
device_printf(sc->sc_dev, "%08X | lmpm_pmg_sel\n", table.lmpm_pmg_sel);
device_printf(sc->sc_dev, "%08X | timestamp\n", table.u_timestamp);
device_printf(sc->sc_dev, "%08X | flow_handler\n", table.flow_handler);
+
+ if (sc->sc_uc.uc_umac_error_event_table)
+ iwm_nic_umac_error(sc);
}
#endif
@@ -4092,18 +5009,23 @@ do { \
static void
iwm_notif_intr(struct iwm_softc *sc)
{
+ struct ieee80211com *ic = &sc->sc_ic;
uint16_t hw;
bus_dmamap_sync(sc->rxq.stat_dma.tag, sc->rxq.stat_dma.map,
BUS_DMASYNC_POSTREAD);
hw = le16toh(sc->rxq.stat->closed_rb_num) & 0xfff;
+
+ /*
+ * Process responses
+ */
while (sc->rxq.cur != hw) {
struct iwm_rx_ring *ring = &sc->rxq;
struct iwm_rx_data *data = &sc->rxq.data[sc->rxq.cur];
struct iwm_rx_packet *pkt;
struct iwm_cmd_response *cresp;
- int qid, idx;
+ int qid, idx, code;
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
@@ -4112,10 +5034,10 @@ iwm_notif_intr(struct iwm_softc *sc)
qid = pkt->hdr.qid & ~0x80;
idx = pkt->hdr.idx;
+ code = IWM_WIDE_ID(pkt->hdr.flags, pkt->hdr.code);
IWM_DPRINTF(sc, IWM_DEBUG_INTR,
- "rx packet qid=%d idx=%d flags=%x type=%x %d %d\n",
- pkt->hdr.qid & ~0x80, pkt->hdr.idx, pkt->hdr.flags,
- pkt->hdr.code, sc->rxq.cur, hw);
+ "rx packet qid=%d idx=%d type=%x %d %d\n",
+ pkt->hdr.qid & ~0x80, pkt->hdr.idx, code, sc->rxq.cur, hw);
/*
* randomly get these from the firmware, no idea why.
@@ -4127,7 +5049,7 @@ iwm_notif_intr(struct iwm_softc *sc)
continue;
}
- switch (pkt->hdr.code) {
+ switch (code) {
case IWM_REPLY_RX_PHY_CMD:
iwm_mvm_rx_rx_phy_cmd(sc, pkt, data);
break;
@@ -4145,7 +5067,6 @@ iwm_notif_intr(struct iwm_softc *sc)
int missed;
/* XXX look at mac_id to determine interface ID */
- struct ieee80211com *ic = &sc->sc_ic;
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
SYNC_RESP_STRUCT(resp, pkt);
@@ -4179,16 +5100,56 @@ iwm_notif_intr(struct iwm_softc *sc)
break; }
+ case IWM_MFUART_LOAD_NOTIFICATION:
+ break;
+
case IWM_MVM_ALIVE: {
- struct iwm_mvm_alive_resp *resp;
- SYNC_RESP_STRUCT(resp, pkt);
+ struct iwm_mvm_alive_resp_v1 *resp1;
+ struct iwm_mvm_alive_resp_v2 *resp2;
+ struct iwm_mvm_alive_resp_v3 *resp3;
+
+ if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp1)) {
+ SYNC_RESP_STRUCT(resp1, pkt);
+ sc->sc_uc.uc_error_event_table
+ = le32toh(resp1->error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table
+ = le32toh(resp1->log_event_table_ptr);
+ sc->sched_base = le32toh(resp1->scd_base_ptr);
+ if (resp1->status == IWM_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ else
+ sc->sc_uc.uc_ok = 0;
+ }
- sc->sc_uc.uc_error_event_table
- = le32toh(resp->error_event_table_ptr);
- sc->sc_uc.uc_log_event_table
- = le32toh(resp->log_event_table_ptr);
- sc->sched_base = le32toh(resp->scd_base_ptr);
- sc->sc_uc.uc_ok = resp->status == IWM_ALIVE_STATUS_OK;
+ if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp2)) {
+ SYNC_RESP_STRUCT(resp2, pkt);
+ sc->sc_uc.uc_error_event_table
+ = le32toh(resp2->error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table
+ = le32toh(resp2->log_event_table_ptr);
+ sc->sched_base = le32toh(resp2->scd_base_ptr);
+ sc->sc_uc.uc_umac_error_event_table
+ = le32toh(resp2->error_info_addr);
+ if (resp2->status == IWM_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ else
+ sc->sc_uc.uc_ok = 0;
+ }
+
+ if (iwm_rx_packet_payload_len(pkt) == sizeof(*resp3)) {
+ SYNC_RESP_STRUCT(resp3, pkt);
+ sc->sc_uc.uc_error_event_table
+ = le32toh(resp3->error_event_table_ptr);
+ sc->sc_uc.uc_log_event_table
+ = le32toh(resp3->log_event_table_ptr);
+ sc->sched_base = le32toh(resp3->scd_base_ptr);
+ sc->sc_uc.uc_umac_error_event_table
+ = le32toh(resp3->error_info_addr);
+ if (resp3->status == IWM_ALIVE_STATUS_OK)
+ sc->sc_uc.uc_ok = 1;
+ else
+ sc->sc_uc.uc_ok = 0;
+ }
sc->sc_uc.uc_intr = 1;
wakeup(&sc->sc_uc);
@@ -4210,6 +5171,7 @@ iwm_notif_intr(struct iwm_softc *sc)
break; }
case IWM_NVM_ACCESS_CMD:
+ case IWM_MCC_UPDATE_CMD:
if (sc->sc_wantresp == ((qid << 16) | idx)) {
bus_dmamap_sync(sc->rxq.data_dmat, data->map,
BUS_DMASYNC_POSTREAD);
@@ -4218,6 +5180,21 @@ iwm_notif_intr(struct iwm_softc *sc)
}
break;
+ case IWM_MCC_CHUB_UPDATE_CMD: {
+ struct iwm_mcc_chub_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ sc->sc_fw_mcc[0] = (notif->mcc & 0xff00) >> 8;
+ sc->sc_fw_mcc[1] = notif->mcc & 0xff;
+ sc->sc_fw_mcc[2] = '\0';
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "fw source %d sent CC '%s'\n",
+ notif->source_id, sc->sc_fw_mcc);
+ break; }
+
+ case IWM_DTS_MEASUREMENT_NOTIFICATION:
+ break;
+
case IWM_PHY_CONFIGURATION_CMD:
case IWM_TX_ANT_CONFIGURATION_CMD:
case IWM_ADD_STA:
@@ -4228,12 +5205,17 @@ iwm_notif_intr(struct iwm_softc *sc)
case IWM_BINDING_CONTEXT_CMD:
case IWM_TIME_EVENT_CMD:
case IWM_SCAN_REQUEST_CMD:
+ case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_CFG_CMD):
+ case IWM_WIDE_ID(IWM_ALWAYS_LONG_GROUP, IWM_SCAN_REQ_UMAC):
+ case IWM_SCAN_OFFLOAD_REQUEST_CMD:
case IWM_REPLY_BEACON_FILTERING_CMD:
case IWM_MAC_PM_POWER_TABLE:
case IWM_TIME_QUOTA_CMD:
case IWM_REMOVE_STA:
case IWM_TXPATH_FLUSH:
case IWM_LQ_CMD:
+ case IWM_BT_CONFIG:
+ case IWM_REPLY_THERMAL_MNG_BACKOFF:
SYNC_RESP_STRUCT(cresp, pkt);
if (sc->sc_wantresp == ((qid << 16) | idx)) {
memcpy(sc->sc_cmd_resp,
@@ -4250,11 +5232,42 @@ iwm_notif_intr(struct iwm_softc *sc)
wakeup(&sc->sc_init_complete);
break;
- case IWM_SCAN_COMPLETE_NOTIFICATION: {
- struct iwm_scan_complete_notif *notif;
+ case IWM_SCAN_OFFLOAD_COMPLETE: {
+ struct iwm_periodic_scan_complete *notif;
SYNC_RESP_STRUCT(notif, pkt);
+ break;
+ }
+
+ case IWM_SCAN_ITERATION_COMPLETE: {
+ struct iwm_lmac_scan_complete_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+ ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
+ break;
+ }
+
+ case IWM_SCAN_COMPLETE_UMAC: {
+ struct iwm_umac_scan_complete *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
+ "UMAC scan complete, status=0x%x\n",
+ notif->status);
+#if 0 /* XXX This would be a duplicate scan end call */
taskqueue_enqueue(sc->sc_tq, &sc->sc_es_task);
- break; }
+#endif
+ break;
+ }
+
+ case IWM_SCAN_ITERATION_COMPLETE_UMAC: {
+ struct iwm_umac_scan_iter_complete_notif *notif;
+ SYNC_RESP_STRUCT(notif, pkt);
+
+ IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "UMAC scan iteration "
+ "complete, status=0x%x, %d channels scanned\n",
+ notif->status, notif->scanned_channels);
+ ieee80211_runtask(&sc->sc_ic, &sc->sc_es_task);
+ break;
+ }
case IWM_REPLY_ERROR: {
struct iwm_error_resp *resp;
@@ -4264,7 +5277,8 @@ iwm_notif_intr(struct iwm_softc *sc)
"firmware error 0x%x, cmd 0x%x\n",
le32toh(resp->error_type),
resp->cmd_id);
- break; }
+ break;
+ }
case IWM_TIME_EVENT_NOTIFICATION: {
struct iwm_time_event_notif *notif;
@@ -4272,12 +5286,25 @@ iwm_notif_intr(struct iwm_softc *sc)
IWM_DPRINTF(sc, IWM_DEBUG_INTR,
"TE notif status = 0x%x action = 0x%x\n",
- notif->status, notif->action);
- break; }
+ notif->status, notif->action);
+ break;
+ }
case IWM_MCAST_FILTER_CMD:
break;
+ case IWM_SCD_QUEUE_CFG: {
+ struct iwm_scd_txq_cfg_rsp *rsp;
+ SYNC_RESP_STRUCT(rsp, pkt);
+
+ IWM_DPRINTF(sc, IWM_DEBUG_CMD,
+ "queue cfg token=0x%x sta_id=%d "
+ "tid=%d scd_queue=%d\n",
+ rsp->token, rsp->sta_id, rsp->tid,
+ rsp->scd_queue);
+ break;
+ }
+
default:
device_printf(sc->sc_dev,
"frame %d/%d %x UNHANDLED (this should "
@@ -4480,10 +5507,14 @@ iwm_intr(void *arg)
#define PCI_VENDOR_INTEL 0x8086
#define PCI_PRODUCT_INTEL_WL_3160_1 0x08b3
#define PCI_PRODUCT_INTEL_WL_3160_2 0x08b4
+#define PCI_PRODUCT_INTEL_WL_3165_1 0x3165
+#define PCI_PRODUCT_INTEL_WL_3165_2 0x3166
#define PCI_PRODUCT_INTEL_WL_7260_1 0x08b1
#define PCI_PRODUCT_INTEL_WL_7260_2 0x08b2
#define PCI_PRODUCT_INTEL_WL_7265_1 0x095a
#define PCI_PRODUCT_INTEL_WL_7265_2 0x095b
+#define PCI_PRODUCT_INTEL_WL_8260_1 0x24f3
+#define PCI_PRODUCT_INTEL_WL_8260_2 0x24f4
static const struct iwm_devices {
uint16_t device;
@@ -4491,10 +5522,14 @@ static const struct iwm_devices {
} iwm_devices[] = {
{ PCI_PRODUCT_INTEL_WL_3160_1, "Intel Dual Band Wireless AC 3160" },
{ PCI_PRODUCT_INTEL_WL_3160_2, "Intel Dual Band Wireless AC 3160" },
+ { PCI_PRODUCT_INTEL_WL_3165_1, "Intel Dual Band Wireless AC 3165" },
+ { PCI_PRODUCT_INTEL_WL_3165_2, "Intel Dual Band Wireless AC 3165" },
{ PCI_PRODUCT_INTEL_WL_7260_1, "Intel Dual Band Wireless AC 7260" },
{ PCI_PRODUCT_INTEL_WL_7260_2, "Intel Dual Band Wireless AC 7260" },
{ PCI_PRODUCT_INTEL_WL_7265_1, "Intel Dual Band Wireless AC 7265" },
{ PCI_PRODUCT_INTEL_WL_7265_2, "Intel Dual Band Wireless AC 7265" },
+ { PCI_PRODUCT_INTEL_WL_8260_1, "Intel Dual Band Wireless AC 8260" },
+ { PCI_PRODUCT_INTEL_WL_8260_2, "Intel Dual Band Wireless AC 8260" },
};
static int
@@ -4502,12 +5537,13 @@ iwm_probe(device_t dev)
{
int i;
- for (i = 0; i < nitems(iwm_devices); i++)
+ for (i = 0; i < nitems(iwm_devices); i++) {
if (pci_get_vendor(dev) == PCI_VENDOR_INTEL &&
pci_get_device(dev) == iwm_devices[i].device) {
device_set_desc(dev, iwm_devices[i].name);
return (BUS_PROBE_DEFAULT);
}
+ }
return (ENXIO);
}
@@ -4519,21 +5555,42 @@ iwm_dev_check(device_t dev)
sc = device_get_softc(dev);
+ sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
switch (pci_get_device(dev)) {
case PCI_PRODUCT_INTEL_WL_3160_1:
case PCI_PRODUCT_INTEL_WL_3160_2:
sc->sc_fwname = "iwm3160fw";
sc->host_interrupt_operation_mode = 1;
+ sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
+ sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
+ return (0);
+ case PCI_PRODUCT_INTEL_WL_3165_1:
+ case PCI_PRODUCT_INTEL_WL_3165_2:
+ sc->sc_fwname = "iwm7265fw";
+ sc->host_interrupt_operation_mode = 0;
+ sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
+ sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
return (0);
case PCI_PRODUCT_INTEL_WL_7260_1:
case PCI_PRODUCT_INTEL_WL_7260_2:
sc->sc_fwname = "iwm7260fw";
sc->host_interrupt_operation_mode = 1;
+ sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
+ sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
return (0);
case PCI_PRODUCT_INTEL_WL_7265_1:
case PCI_PRODUCT_INTEL_WL_7265_2:
sc->sc_fwname = "iwm7265fw";
sc->host_interrupt_operation_mode = 0;
+ sc->sc_device_family = IWM_DEVICE_FAMILY_7000;
+ sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
+ return (0);
+ case PCI_PRODUCT_INTEL_WL_8260_1:
+ case PCI_PRODUCT_INTEL_WL_8260_2:
+ sc->sc_fwname = "iwm8000Cfw";
+ sc->host_interrupt_operation_mode = 0;
+ sc->sc_device_family = IWM_DEVICE_FAMILY_8000;
+ sc->sc_fwdmasegsz = IWM_FWDMASEGSZ_8000;
return (0);
default:
device_printf(dev, "unknown adapter type\n");
@@ -4627,14 +5684,6 @@ iwm_attach(device_t dev)
callout_init_mtx(&sc->sc_watchdog_to, &sc->sc_mtx, 0);
callout_init_mtx(&sc->sc_led_blink_to, &sc->sc_mtx, 0);
TASK_INIT(&sc->sc_es_task, 0, iwm_endscan_cb, sc);
- sc->sc_tq = taskqueue_create("iwm_taskq", M_WAITOK,
- taskqueue_thread_enqueue, &sc->sc_tq);
- error = taskqueue_start_threads(&sc->sc_tq, 1, 0, "iwm_taskq");
- if (error != 0) {
- device_printf(dev, "can't start threads, error %d\n",
- error);
- goto fail;
- }
/* PCI attach */
error = iwm_pci_attach(dev);
@@ -4648,17 +5697,62 @@ iwm_attach(device_t dev)
if (error != 0)
goto fail;
- sc->sc_fwdmasegsz = IWM_FWDMASEGSZ;
-
/*
* We now start fiddling with the hardware
*/
- sc->sc_hw_rev = IWM_READ(sc, IWM_CSR_HW_REV);
+ /*
+ * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
+ * changed, and now the revision step also includes bit 0-1 (no more
+ * "dash" value). To keep hw_rev backwards compatible - we'll store it
+ * in the old format.
+ */
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
+ sc->sc_hw_rev = (sc->sc_hw_rev & 0xfff0) |
+ (IWM_CSR_HW_REV_STEP(sc->sc_hw_rev << 2) << 2);
+
if (iwm_prepare_card_hw(sc) != 0) {
device_printf(dev, "could not initialize hardware\n");
goto fail;
}
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000) {
+ int ret;
+ uint32_t hw_step;
+
+ /*
+ * In order to recognize C step the driver should read the
+ * chip version id located at the AUX bus MISC address.
+ */
+ IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
+ DELAY(2);
+
+ ret = iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
+ 25000);
+ if (ret < 0) {
+ device_printf(sc->sc_dev,
+ "Failed to wake up the nic\n");
+ goto fail;
+ }
+
+ if (iwm_nic_lock(sc)) {
+ hw_step = iwm_read_prph(sc, IWM_WFPM_CTRL_REG);
+ hw_step |= IWM_ENABLE_WFPM;
+ iwm_write_prph(sc, IWM_WFPM_CTRL_REG, hw_step);
+ hw_step = iwm_read_prph(sc, IWM_AUX_MISC_REG);
+ hw_step = (hw_step >> IWM_HW_STEP_LOCATION_BITS) & 0xF;
+ if (hw_step == 0x3)
+ sc->sc_hw_rev = (sc->sc_hw_rev & 0xFFFFFFF3) |
+ (IWM_SILICON_C_STEP << 2);
+ iwm_nic_unlock(sc);
+ } else {
+ device_printf(sc->sc_dev, "Failed to lock the nic\n");
+ goto fail;
+ }
+ }
+
/* Allocate DMA memory for firmware transfers. */
if ((error = iwm_alloc_fwmem(sc)) != 0) {
device_printf(dev, "could not allocate memory for firmware\n");
@@ -4752,6 +5846,17 @@ fail:
}
static int
+iwm_is_valid_ether_addr(uint8_t *addr)
+{
+ char zero_addr[IEEE80211_ADDR_LEN] = { 0, 0, 0, 0, 0, 0 };
+
+ if ((addr[0] & 1) || IEEE80211_ADDR_EQ(zero_addr, addr))
+ return (FALSE);
+
+ return (TRUE);
+}
+
+static int
iwm_update_edca(struct ieee80211com *ic)
{
struct iwm_softc *sc = ic->ic_softc;
@@ -4785,11 +5890,9 @@ iwm_preinit(void *arg)
goto fail;
}
device_printf(dev,
- "revision: 0x%x, firmware %d.%d (API ver. %d)\n",
+ "hw rev 0x%x, fw ver %s, address %s\n",
sc->sc_hw_rev & IWM_CSR_HW_REV_TYPE_MSK,
- IWM_UCODE_MAJOR(sc->sc_fwver),
- IWM_UCODE_MINOR(sc->sc_fwver),
- IWM_UCODE_API(sc->sc_fwver));
+ sc->sc_fwver, ether_sprintf(sc->sc_nvm.hw_addr));
/* not all hardware can do 5GHz band */
if (!sc->sc_nvm.sku_cap_band_52GHz_enable)
@@ -4894,18 +5997,18 @@ static void
iwm_scan_start(struct ieee80211com *ic)
{
struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- struct iwm_softc *sc = ic->ic_softc;
+ struct iwm_softc *sc = ic->ic_softc;
int error;
- if (sc->sc_scanband)
- return;
IWM_LOCK(sc);
- error = iwm_mvm_scan_request(sc, IEEE80211_CHAN_2GHZ, 0, NULL, 0);
- if (error) {
+ if (isset(sc->sc_enabled_capa, IWM_UCODE_TLV_CAPA_UMAC_SCAN))
+ error = iwm_mvm_umac_scan(sc);
+ else
+ error = iwm_mvm_lmac_scan(sc);
+ if (error != 0) {
device_printf(sc->sc_dev, "could not initiate 2 GHz scan\n");
IWM_UNLOCK(sc);
ieee80211_cancel_scan(vap);
- sc->sc_scanband = 0;
} else {
iwm_led_blink_start(sc);
IWM_UNLOCK(sc);
@@ -4976,8 +6079,8 @@ iwm_resume(device_t dev)
iwm_init_task(device_get_softc(dev));
IWM_LOCK(sc);
- if (sc->sc_flags & IWM_FLAG_DORESUME) {
- sc->sc_flags &= ~IWM_FLAG_DORESUME;
+ if (sc->sc_flags & IWM_FLAG_SCANNING) {
+ sc->sc_flags &= ~IWM_FLAG_SCANNING;
do_reinit = 1;
}
IWM_UNLOCK(sc);
@@ -5001,7 +6104,7 @@ iwm_suspend(device_t dev)
if (do_stop) {
IWM_LOCK(sc);
iwm_stop(sc);
- sc->sc_flags |= IWM_FLAG_DORESUME;
+ sc->sc_flags |= IWM_FLAG_SCANNING;
IWM_UNLOCK(sc);
}
@@ -5015,15 +6118,14 @@ iwm_detach_local(struct iwm_softc *sc, int do_net80211)
device_t dev = sc->sc_dev;
int i;
- if (sc->sc_tq) {
- taskqueue_drain_all(sc->sc_tq);
- taskqueue_free(sc->sc_tq);
- }
+ ieee80211_draintask(&sc->sc_ic, &sc->sc_es_task);
+
callout_drain(&sc->sc_led_blink_to);
callout_drain(&sc->sc_watchdog_to);
iwm_stop_device(sc);
- if (do_net80211)
+ if (do_net80211) {
ieee80211_ifdetach(&sc->sc_ic);
+ }
iwm_phy_db_free(sc);
diff --git a/sys/dev/iwm/if_iwm_led.c b/sys/dev/iwm/if_iwm_led.c
index 53a385720ddf..dce4e8d1aa49 100644
--- a/sys/dev/iwm/if_iwm_led.c
+++ b/sys/dev/iwm/if_iwm_led.c
@@ -148,13 +148,13 @@ iwm_mvm_led_disable(struct iwm_softc *sc)
IWM_WRITE(sc, IWM_CSR_LED_REG, IWM_CSR_LED_REG_TURN_OFF);
}
-int
+static int
iwm_mvm_led_is_enabled(struct iwm_softc *sc)
{
return (IWM_READ(sc, IWM_CSR_LED_REG) == IWM_CSR_LED_REG_TURN_ON);
}
-void
+static void
iwm_led_blink_timeout(void *arg)
{
struct iwm_softc *sc = arg;
@@ -177,6 +177,6 @@ iwm_led_blink_start(struct iwm_softc *sc)
void
iwm_led_blink_stop(struct iwm_softc *sc)
{
- callout_drain(&sc->sc_led_blink_to);
+ callout_stop(&sc->sc_led_blink_to);
iwm_mvm_led_disable(sc);
}
diff --git a/sys/dev/iwm/if_iwm_led.h b/sys/dev/iwm/if_iwm_led.h
index b5c7d1ce17b0..4fa6bbaff6d6 100644
--- a/sys/dev/iwm/if_iwm_led.h
+++ b/sys/dev/iwm/if_iwm_led.h
@@ -91,11 +91,9 @@
#ifndef __IF_IWM_LED_H__
#define __IF_IWM_LED_H__
-void iwm_mvm_led_enable(struct iwm_softc *);
-void iwm_mvm_led_disable(struct iwm_softc *);
-int iwm_mvm_led_is_enabled(struct iwm_softc *);
-void iwm_led_blink_timeout(void *);
-void iwm_led_blink_start(struct iwm_softc *);
-void iwm_led_blink_stop(struct iwm_softc *);
+extern void iwm_mvm_led_enable(struct iwm_softc *);
+extern void iwm_mvm_led_disable(struct iwm_softc *);
+extern void iwm_led_blink_start(struct iwm_softc *);
+extern void iwm_led_blink_stop(struct iwm_softc *);
#endif /* __IF_IWM_LED_H__ */
diff --git a/sys/dev/iwm/if_iwm_mac_ctxt.c b/sys/dev/iwm/if_iwm_mac_ctxt.c
index 139326056c82..0d2da5f6c799 100644
--- a/sys/dev/iwm/if_iwm_mac_ctxt.c
+++ b/sys/dev/iwm/if_iwm_mac_ctxt.c
@@ -280,12 +280,24 @@ iwm_mvm_mac_ctxt_cmd_common(struct iwm_softc *sc, struct iwm_node *in,
/*
* XXX should we error out if in_assoc is 1 and ni == NULL?
*/
+#if 0
if (in->in_assoc) {
IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
} else {
/* eth broadcast address */
- memset(cmd->bssid_addr, 0xff, sizeof(cmd->bssid_addr));
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, ieee80211broadcastaddr);
}
+#else
+ /*
+ * XXX This workaround makes the firmware behave more correctly once
+ * we are associated, regularly giving us statistics notifications,
+ * as well as signaling missed beacons to us.
+ * Since we only call iwm_mvm_mac_ctxt_add() and
+ * iwm_mvm_mac_ctxt_changed() when already authenticating or
+ * associating, ni->ni_bssid should always make sense here.
+ */
+ IEEE80211_ADDR_COPY(cmd->bssid_addr, ni->ni_bssid);
+#endif
/*
* Default to 2ghz if no node information is given.
@@ -457,13 +469,7 @@ static int
iwm_mvm_mac_ctx_send(struct iwm_softc *sc, struct ieee80211vap *vap,
uint32_t action)
{
- int ret;
-
- ret = iwm_mvm_mac_ctxt_cmd_station(sc, vap, action);
- if (ret)
- return (ret);
-
- return (0);
+ return iwm_mvm_mac_ctxt_cmd_station(sc, vap, action);
}
int
@@ -489,17 +495,13 @@ int
iwm_mvm_mac_ctxt_changed(struct iwm_softc *sc, struct ieee80211vap *vap)
{
struct iwm_vap *iv = IWM_VAP(vap);
- int ret;
if (iv->is_uploaded == 0) {
device_printf(sc->sc_dev, "%s: called; uploaded = 0\n",
__func__);
return (EIO);
}
- ret = iwm_mvm_mac_ctx_send(sc, vap, IWM_FW_CTXT_ACTION_MODIFY);
- if (ret)
- return (ret);
- return (0);
+ return iwm_mvm_mac_ctx_send(sc, vap, IWM_FW_CTXT_ACTION_MODIFY);
}
#if 0
diff --git a/sys/dev/iwm/if_iwm_pcie_trans.c b/sys/dev/iwm/if_iwm_pcie_trans.c
index 8f2f72827604..0ef3503faee1 100644
--- a/sys/dev/iwm/if_iwm_pcie_trans.c
+++ b/sys/dev/iwm/if_iwm_pcie_trans.c
@@ -256,13 +256,18 @@ iwm_nic_lock(struct iwm_softc *sc)
IWM_SETBITS(sc, IWM_CSR_GP_CNTRL,
IWM_CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_8000)
+ DELAY(2);
+
if (iwm_poll_bit(sc, IWM_CSR_GP_CNTRL,
IWM_CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
IWM_CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY
| IWM_CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP, 15000)) {
- rv = 1;
+ rv = 1;
} else {
/* jolt */
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET,
+ "%s: resetting device via NMI\n", __func__);
IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_FORCE_NMI);
}
@@ -343,13 +348,20 @@ iwm_check_rfkill(struct iwm_softc *sc)
int
iwm_set_hw_ready(struct iwm_softc *sc)
{
+ int ready;
+
IWM_SETBITS(sc, IWM_CSR_HW_IF_CONFIG_REG,
IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
- return iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
+ ready = iwm_poll_bit(sc, IWM_CSR_HW_IF_CONFIG_REG,
IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
IWM_HW_READY_TIMEOUT);
+ if (ready) {
+ IWM_SETBITS(sc, IWM_CSR_MBOX_SET_REG,
+ IWM_CSR_MBOX_SET_REG_OS_ALIVE);
+ }
+ return ready;
}
#undef IWM_HW_READY_TIMEOUT
@@ -413,8 +425,10 @@ iwm_apm_init(struct iwm_softc *sc)
IWM_DPRINTF(sc, IWM_DEBUG_RESET, "iwm apm start\n");
/* Disable L0S exit timer (platform NMI Work/Around) */
- IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
- IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ if (sc->sc_device_family != IWM_DEVICE_FAMILY_8000) {
+ IWM_SETBITS(sc, IWM_CSR_GIO_CHICKEN_BITS,
+ IWM_CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
+ }
/*
* Disable L0s without affecting L1;
@@ -435,7 +449,7 @@ iwm_apm_init(struct iwm_softc *sc)
iwm_apm_config(sc);
-#if 0 /* not for 7k */
+#if 0 /* not for 7k/8k */
/* Configure analog phase-lock-loop before activating to D0A */
if (trans->cfg->base_params->pll_cfg_val)
IWM_SETBITS(trans, IWM_CSR_ANA_PLL_CFG,
@@ -491,18 +505,19 @@ iwm_apm_init(struct iwm_softc *sc)
* do not disable clocks. This preserves any hardware bits already
* set by default in "CLK_CTRL_REG" after reset.
*/
- iwm_write_prph(sc, IWM_APMG_CLK_EN_REG, IWM_APMG_CLK_VAL_DMA_CLK_RQT);
- //kpause("iwmapm", 0, mstohz(20), NULL);
- DELAY(20);
-
- /* Disable L1-Active */
- iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
- IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
-
- /* Clear the interrupt in APMG if the NIC is in RFKILL */
- iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
- IWM_APMG_RTC_INT_STT_RFKILL);
-
+ if (sc->sc_device_family == IWM_DEVICE_FAMILY_7000) {
+ iwm_write_prph(sc, IWM_APMG_CLK_EN_REG,
+ IWM_APMG_CLK_VAL_DMA_CLK_RQT);
+ DELAY(20);
+
+ /* Disable L1-Active */
+ iwm_set_bits_prph(sc, IWM_APMG_PCIDEV_STT_REG,
+ IWM_APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
+
+ /* Clear the interrupt in APMG if the NIC is in RFKILL */
+ iwm_write_prph(sc, IWM_APMG_RTC_INT_STT_REG,
+ IWM_APMG_RTC_INT_STT_RFKILL);
+ }
out:
if (error)
device_printf(sc->sc_dev, "apm init error %d\n", error);
@@ -533,9 +548,7 @@ iwm_start_hw(struct iwm_softc *sc)
return error;
/* Reset the entire device */
- IWM_WRITE(sc, IWM_CSR_RESET,
- IWM_CSR_RESET_REG_FLAG_SW_RESET |
- IWM_CSR_RESET_REG_FLAG_NEVO_RESET);
+ IWM_WRITE(sc, IWM_CSR_RESET, IWM_CSR_RESET_REG_FLAG_SW_RESET);
DELAY(10);
if ((error = iwm_apm_init(sc)) != 0)
diff --git a/sys/dev/iwm/if_iwm_phy_ctxt.c b/sys/dev/iwm/if_iwm_phy_ctxt.c
index fc8349d028a7..da6bb1a0b4bd 100644
--- a/sys/dev/iwm/if_iwm_phy_ctxt.c
+++ b/sys/dev/iwm/if_iwm_phy_ctxt.c
@@ -202,8 +202,8 @@ iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
ieee80211_chan2ieee(ic, chan),
chains_static,
chains_dynamic,
- IWM_FW_VALID_RX_ANT(sc),
- IWM_FW_VALID_TX_ANT(sc));
+ iwm_fw_valid_rx_ant(sc),
+ iwm_fw_valid_tx_ant(sc));
cmd->ci.band = IEEE80211_IS_CHAN_2GHZ(chan) ?
@@ -217,13 +217,13 @@ iwm_mvm_phy_ctxt_cmd_data(struct iwm_softc *sc,
idle_cnt = chains_static;
active_cnt = chains_dynamic;
- cmd->rxchain_info = htole32(IWM_FW_VALID_RX_ANT(sc) <<
+ cmd->rxchain_info = htole32(iwm_fw_valid_rx_ant(sc) <<
IWM_PHY_RX_CHAIN_VALID_POS);
cmd->rxchain_info |= htole32(idle_cnt << IWM_PHY_RX_CHAIN_CNT_POS);
cmd->rxchain_info |= htole32(active_cnt <<
IWM_PHY_RX_CHAIN_MIMO_CNT_POS);
- cmd->txchain_info = htole32(IWM_FW_VALID_TX_ANT(sc));
+ cmd->txchain_info = htole32(iwm_fw_valid_tx_ant(sc));
}
/*
diff --git a/sys/dev/iwm/if_iwm_phy_db.c b/sys/dev/iwm/if_iwm_phy_db.c
index 42d891466cdf..b96f87490d74 100644
--- a/sys/dev/iwm/if_iwm_phy_db.c
+++ b/sys/dev/iwm/if_iwm_phy_db.c
@@ -343,7 +343,6 @@ iwm_send_phy_db_cmd(struct iwm_softc *sc, uint16_t type,
cmd.len[0] = sizeof(struct iwm_phy_db_cmd);
cmd.data[1] = data;
cmd.len[1] = length;
- cmd.dataflags[1] = IWM_HCMD_DFL_NOCOPY;
return iwm_send_cmd(sc, &cmd);
}
@@ -374,6 +373,7 @@ iwm_phy_db_send_all_channel_groups(struct iwm_softc *sc,
return err;
}
+ DELAY(1000);
IWM_DPRINTF(sc, IWM_DEBUG_CMD,
"Sent PHY_DB HCMD, type = %d num = %d\n", type, i);
}
diff --git a/sys/dev/iwm/if_iwm_power.c b/sys/dev/iwm/if_iwm_power.c
index 6503756ac232..aca51f89471b 100644
--- a/sys/dev/iwm/if_iwm_power.c
+++ b/sys/dev/iwm/if_iwm_power.c
@@ -335,14 +335,3 @@ iwm_mvm_disable_beacon_filter(struct iwm_softc *sc)
return ret;
}
-
-#if 0
-static int
-iwm_mvm_update_beacon_filter(struct iwm_softc *sc, struct iwm_node *in)
-{
- if (!sc->sc_bf.bf_enabled)
- return 0;
-
- return iwm_mvm_enable_beacon_filter(sc, in);
-}
-#endif
diff --git a/sys/dev/iwm/if_iwm_scan.c b/sys/dev/iwm/if_iwm_scan.c
index 01b0872c105c..94f7125da424 100644
--- a/sys/dev/iwm/if_iwm_scan.c
+++ b/sys/dev/iwm/if_iwm_scan.c
@@ -172,7 +172,7 @@ iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
uint16_t rx_chain;
uint8_t rx_ant;
- rx_ant = IWM_FW_VALID_RX_ANT(sc);
+ rx_ant = iwm_fw_valid_rx_ant(sc);
rx_chain = rx_ant << IWM_PHY_RX_CHAIN_VALID_POS;
rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_MIMO_SEL_POS;
rx_chain |= rx_ant << IWM_PHY_RX_CHAIN_FORCE_SEL_POS;
@@ -180,6 +180,7 @@ iwm_mvm_scan_rx_chain(struct iwm_softc *sc)
return htole16(rx_chain);
}
+#if 0
static uint32_t
iwm_mvm_scan_max_out_time(struct iwm_softc *sc, uint32_t flags, int is_assoc)
{
@@ -197,15 +198,7 @@ iwm_mvm_scan_suspend_time(struct iwm_softc *sc, int is_assoc)
return 0;
return htole32(SUSPEND_TIME_PERIOD);
}
-
-static uint32_t
-iwm_mvm_scan_rxon_flags(struct iwm_softc *sc, int flags)
-{
- if (flags & IEEE80211_CHAN_2GHZ)
- return htole32(IWM_PHY_BAND_24);
- else
- return htole32(IWM_PHY_BAND_5);
-}
+#endif
static uint32_t
iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
@@ -216,7 +209,7 @@ iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
for (i = 0, ind = sc->sc_scan_last_antenna;
i < IWM_RATE_MCS_ANT_NUM; i++) {
ind = (ind + 1) % IWM_RATE_MCS_ANT_NUM;
- if (IWM_FW_VALID_TX_ANT(sc) & (1 << ind)) {
+ if (iwm_fw_valid_tx_ant(sc) & (1 << ind)) {
sc->sc_scan_last_antenna = ind;
break;
}
@@ -230,6 +223,7 @@ iwm_mvm_scan_rate_n_flags(struct iwm_softc *sc, int flags, int no_cck)
return htole32(IWM_RATE_6M_PLCP | tx_ant);
}
+#if 0
/*
* If req->n_ssids > 0, it means we should do an active scan.
* In case of active scan w/o directed scan, we receive a zero-length SSID
@@ -253,24 +247,30 @@ iwm_mvm_get_passive_dwell(struct iwm_softc *sc, int flags)
{
return (flags & IEEE80211_CHAN_2GHZ) ? 100 + 20 : 100 + 10;
}
+#endif
static int
-iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
- int flags, int n_ssids, int basic_ssid)
+iwm_mvm_scan_skip_channel(struct ieee80211_channel *c)
+{
+ if (IEEE80211_IS_CHAN_2GHZ(c) && IEEE80211_IS_CHAN_B(c))
+ return 0;
+ else if (IEEE80211_IS_CHAN_5GHZ(c) && IEEE80211_IS_CHAN_A(c))
+ return 0;
+ else
+ return 1;
+}
+
+static uint8_t
+iwm_mvm_lmac_scan_fill_channels(struct iwm_softc *sc,
+ struct iwm_scan_channel_cfg_lmac *chan, int n_ssids)
{
struct ieee80211com *ic = &sc->sc_ic;
- uint16_t passive_dwell = iwm_mvm_get_passive_dwell(sc, flags);
- uint16_t active_dwell = iwm_mvm_get_active_dwell(sc, flags, n_ssids);
- struct iwm_scan_channel *chan = (struct iwm_scan_channel *)
- (cmd->data + le16toh(cmd->tx_cmd.len));
- int type = (1 << n_ssids) - 1;
struct ieee80211_channel *c;
- int nchan, j;
+ uint8_t nchan;
+ int j;
- if (!basic_ssid)
- type |= (1 << n_ssids);
-
- for (nchan = j = 0; j < ic->ic_nchans; j++) {
+ for (nchan = j = 0;
+ j < ic->ic_nchans && nchan < sc->sc_capa_n_scan_channels; j++) {
c = &ic->ic_channels[j];
/* For 2GHz, only populate 11b channels */
/* For 5GHz, only populate 11a channels */
@@ -278,175 +278,458 @@ iwm_mvm_scan_fill_channels(struct iwm_softc *sc, struct iwm_scan_cmd *cmd,
* Catch other channels, in case we have 900MHz channels or
* something in the chanlist.
*/
- if ((flags & IEEE80211_CHAN_2GHZ) && (! IEEE80211_IS_CHAN_B(c))) {
- continue;
- } else if ((flags & IEEE80211_CHAN_5GHZ) && (! IEEE80211_IS_CHAN_A(c))) {
+ if (iwm_mvm_scan_skip_channel(c)) {
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_EEPROM,
+ "%s: skipping channel (freq=%d, ieee=%d, flags=0x%08x)\n",
+ __func__, c->ic_freq, c->ic_ieee, c->ic_flags);
continue;
- } else {
+ }
+
+ IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_EEPROM,
+ "Adding channel %d (%d Mhz) to the list\n",
+ nchan, c->ic_freq);
+ chan->channel_num = htole16(ieee80211_mhz2ieee(c->ic_freq, 0));
+ chan->iter_count = htole16(1);
+ chan->iter_interval = htole32(0);
+ chan->flags = htole32(IWM_UNIFIED_SCAN_CHANNEL_PARTIAL);
+#if 0 /* makes scanning while associated less useful */
+ if (n_ssids != 0)
+ chan->flags |= htole32(1 << 1); /* select SSID 0 */
+#endif
+ chan++;
+ nchan++;
+ }
+
+ return nchan;
+}
+
+static uint8_t
+iwm_mvm_umac_scan_fill_channels(struct iwm_softc *sc,
+ struct iwm_scan_channel_cfg_umac *chan, int n_ssids)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211_channel *c;
+ uint8_t nchan;
+ int j;
+
+ for (nchan = j = 0;
+ j < ic->ic_nchans && nchan < sc->sc_capa_n_scan_channels; j++) {
+ c = &ic->ic_channels[j];
+ /* For 2GHz, only populate 11b channels */
+ /* For 5GHz, only populate 11a channels */
+ /*
+ * Catch other channels, in case we have 900MHz channels or
+ * something in the chanlist.
+ */
+ if (iwm_mvm_scan_skip_channel(c)) {
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_EEPROM,
"%s: skipping channel (freq=%d, ieee=%d, flags=0x%08x)\n",
- __func__,
- c->ic_freq,
- c->ic_ieee,
- c->ic_flags);
+ __func__, c->ic_freq, c->ic_ieee, c->ic_flags);
+ continue;
}
+
IWM_DPRINTF(sc, IWM_DEBUG_RESET | IWM_DEBUG_EEPROM,
"Adding channel %d (%d Mhz) to the list\n",
- nchan, c->ic_freq);
- chan->channel = htole16(ieee80211_mhz2ieee(c->ic_freq, flags));
- chan->type = htole32(type);
- if (c->ic_flags & IEEE80211_CHAN_PASSIVE)
- chan->type &= htole32(~IWM_SCAN_CHANNEL_TYPE_ACTIVE);
- chan->active_dwell = htole16(active_dwell);
- chan->passive_dwell = htole16(passive_dwell);
- chan->iteration_count = htole16(1);
+ nchan, c->ic_freq);
+ chan->channel_num = ieee80211_mhz2ieee(c->ic_freq, 0);
+ chan->iter_count = 1;
+ chan->iter_interval = htole16(0);
+ chan->flags = htole32(0);
+#if 0 /* makes scanning while associated less useful */
+ if (n_ssids != 0)
+ chan->flags = htole32(1 << 0); /* select SSID 0 */
+#endif
chan++;
nchan++;
}
- if (nchan == 0)
- device_printf(sc->sc_dev,
- "%s: NO CHANNEL!\n", __func__);
+
return nchan;
}
-/*
- * Fill in probe request with the following parameters:
- * TA is our vif HW address, which mac80211 ensures we have.
- * Packet is broadcasted, so this is both SA and DA.
- * The probe request IE is made out of two: first comes the most prioritized
- * SSID if a directed scan is requested. Second comes whatever extra
- * information was given to us as the scan request IE.
- */
-static uint16_t
-iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct ieee80211_frame *frame,
- const uint8_t *ta, int n_ssids, const uint8_t *ssid, int ssid_len,
- const uint8_t *ie, int ie_len, int left)
+static int
+iwm_mvm_fill_probe_req(struct iwm_softc *sc, struct iwm_scan_probe_req *preq)
{
- uint8_t *pos = NULL;
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
+ struct ieee80211_frame *wh = (struct ieee80211_frame *)preq->buf;
+ struct ieee80211_rateset *rs;
+ size_t remain = sizeof(preq->buf);
+ uint8_t *frm, *pos;
+ int ssid_len = 0;
+ const uint8_t *ssid = NULL;
- /* Make sure there is enough space for the probe request,
- * two mandatory IEs and the data */
- left -= sizeof(*frame);
- if (left < 0)
- return 0;
+ memset(preq, 0, sizeof(*preq));
- frame->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
+ /* Ensure enough space for header and SSID IE. */
+ if (remain < sizeof(*wh) + 2 + ssid_len)
+ return ENOBUFS;
+
+ /*
+ * Build a probe request frame. Most of the following code is a
+ * copy & paste of what is done in net80211.
+ */
+ wh->i_fc[0] = IEEE80211_FC0_VERSION_0 | IEEE80211_FC0_TYPE_MGT |
IEEE80211_FC0_SUBTYPE_PROBE_REQ;
- frame->i_fc[1] = IEEE80211_FC1_DIR_NODS;
- IEEE80211_ADDR_COPY(frame->i_addr1, ieee80211broadcastaddr);
- IEEE80211_ADDR_COPY(frame->i_addr2, ta);
- IEEE80211_ADDR_COPY(frame->i_addr3, ieee80211broadcastaddr);
+ wh->i_fc[1] = IEEE80211_FC1_DIR_NODS;
+ IEEE80211_ADDR_COPY(wh->i_addr1, ieee80211broadcastaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr2, vap ? vap->iv_myaddr : ic->ic_macaddr);
+ IEEE80211_ADDR_COPY(wh->i_addr3, ieee80211broadcastaddr);
+ *(uint16_t *)&wh->i_dur[0] = 0; /* filled by HW */
+ *(uint16_t *)&wh->i_seq[0] = 0; /* filled by HW */
+
+ frm = (uint8_t *)(wh + 1);
+ frm = ieee80211_add_ssid(frm, ssid, ssid_len);
+
+ /* Tell the firmware where the MAC header is. */
+ preq->mac_header.offset = 0;
+ preq->mac_header.len = htole16(frm - (uint8_t *)wh);
+ remain -= frm - (uint8_t *)wh;
+
+ /* Fill in 2GHz IEs and tell firmware where they are. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11G];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates) {
+ return ENOBUFS;
+ }
+ preq->band_data[0].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ preq->band_data[0].len = htole16(frm - pos);
+ remain -= frm - pos;
+
+ if (isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) {
+ if (remain < 3)
+ return ENOBUFS;
+ *frm++ = IEEE80211_ELEMID_DSPARMS;
+ *frm++ = 1;
+ *frm++ = 0;
+ remain -= 3;
+ }
- /* for passive scans, no need to fill anything */
- if (n_ssids == 0)
- return sizeof(*frame);
+ if (sc->sc_nvm.sku_cap_band_52GHz_enable) {
+ /* Fill in 5GHz IEs. */
+ rs = &ic->ic_sup_rates[IEEE80211_MODE_11A];
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE) {
+ if (remain < 4 + rs->rs_nrates)
+ return ENOBUFS;
+ } else if (remain < 2 + rs->rs_nrates) {
+ return ENOBUFS;
+ }
+ preq->band_data[1].offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+ frm = ieee80211_add_rates(frm, rs);
+ if (rs->rs_nrates > IEEE80211_RATE_SIZE)
+ frm = ieee80211_add_xrates(frm, rs);
+ preq->band_data[1].len = htole16(frm - pos);
+ remain -= frm - pos;
+ }
- /* points to the payload of the request */
- pos = (uint8_t *)frame + sizeof(*frame);
+ /* Send 11n IEs on both 2GHz and 5GHz bands. */
+ preq->common_data.offset = htole16(frm - (uint8_t *)wh);
+ pos = frm;
+#if 0
+ if (ic->ic_flags & IEEE80211_F_HTON) {
+ if (remain < 28)
+ return ENOBUFS;
+ frm = ieee80211_add_htcaps(frm, ic);
+ /* XXX add WME info? */
+ }
+#endif
+ preq->common_data.len = htole16(frm - pos);
- /* fill in our SSID IE */
- left -= ssid_len + 2;
- if (left < 0)
- return 0;
+ return 0;
+}
- pos = ieee80211_add_ssid(pos, ssid, ssid_len);
+int
+iwm_mvm_config_umac_scan(struct iwm_softc *sc)
+{
+ struct ieee80211com *ic = &sc->sc_ic;
+ struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- if (ie && ie_len && left >= ie_len) {
- memcpy(pos, ie, ie_len);
- pos += ie_len;
+ struct iwm_scan_config *scan_config;
+ int ret, j, nchan;
+ size_t cmd_size;
+ struct ieee80211_channel *c;
+ struct iwm_host_cmd hcmd = {
+ .id = iwm_cmd_id(IWM_SCAN_CFG_CMD, IWM_ALWAYS_LONG_GROUP, 0),
+ .flags = IWM_CMD_SYNC,
+ };
+ static const uint32_t rates = (IWM_SCAN_CONFIG_RATE_1M |
+ IWM_SCAN_CONFIG_RATE_2M | IWM_SCAN_CONFIG_RATE_5M |
+ IWM_SCAN_CONFIG_RATE_11M | IWM_SCAN_CONFIG_RATE_6M |
+ IWM_SCAN_CONFIG_RATE_9M | IWM_SCAN_CONFIG_RATE_12M |
+ IWM_SCAN_CONFIG_RATE_18M | IWM_SCAN_CONFIG_RATE_24M |
+ IWM_SCAN_CONFIG_RATE_36M | IWM_SCAN_CONFIG_RATE_48M |
+ IWM_SCAN_CONFIG_RATE_54M);
+
+ cmd_size = sizeof(*scan_config) + sc->sc_capa_n_scan_channels;
+
+ scan_config = malloc(cmd_size, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (scan_config == NULL)
+ return ENOMEM;
+
+ scan_config->tx_chains = htole32(iwm_fw_valid_tx_ant(sc));
+ scan_config->rx_chains = htole32(iwm_fw_valid_rx_ant(sc));
+ scan_config->legacy_rates = htole32(rates |
+ IWM_SCAN_CONFIG_SUPPORTED_RATE(rates));
+
+ /* These timings correspond to iwlwifi's UNASSOC scan. */
+ scan_config->dwell_active = 10;
+ scan_config->dwell_passive = 110;
+ scan_config->dwell_fragmented = 44;
+ scan_config->dwell_extended = 90;
+ scan_config->out_of_channel_time = htole32(0);
+ scan_config->suspend_time = htole32(0);
+
+ IEEE80211_ADDR_COPY(scan_config->mac_addr,
+ vap ? vap->iv_myaddr : ic->ic_macaddr);
+
+ scan_config->bcast_sta_id = sc->sc_aux_sta.sta_id;
+ scan_config->channel_flags = IWM_CHANNEL_FLAG_EBS |
+ IWM_CHANNEL_FLAG_ACCURATE_EBS | IWM_CHANNEL_FLAG_EBS_ADD |
+ IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE;
+
+ for (nchan = j = 0;
+ j < ic->ic_nchans && nchan < sc->sc_capa_n_scan_channels; j++) {
+ c = &ic->ic_channels[j];
+ /* For 2GHz, only populate 11b channels */
+ /* For 5GHz, only populate 11a channels */
+ /*
+ * Catch other channels, in case we have 900MHz channels or
+ * something in the chanlist.
+ */
+ if (iwm_mvm_scan_skip_channel(c))
+ continue;
+ scan_config->channel_array[nchan++] =
+ ieee80211_mhz2ieee(c->ic_freq, 0);
}
- return pos - (uint8_t *)frame;
+ scan_config->flags = htole32(IWM_SCAN_CONFIG_FLAG_ACTIVATE |
+ IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS |
+ IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS |
+ IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS |
+ IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID |
+ IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES |
+ IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES |
+ IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR |
+ IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS|
+ IWM_SCAN_CONFIG_N_CHANNELS(nchan) |
+ IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED);
+
+ hcmd.data[0] = scan_config;
+ hcmd.len[0] = cmd_size;
+
+ IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "Sending UMAC scan config\n");
+
+ ret = iwm_send_cmd(sc, &hcmd);
+ if (!ret)
+ IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
+ "UMAC scan config was sent successfully\n");
+
+ free(scan_config, M_DEVBUF);
+ return ret;
}
int
-iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
- int n_ssids, uint8_t *ssid, int ssid_len)
+iwm_mvm_umac_scan(struct iwm_softc *sc)
{
struct iwm_host_cmd hcmd = {
- .id = IWM_SCAN_REQUEST_CMD,
+ .id = iwm_cmd_id(IWM_SCAN_REQ_UMAC, IWM_ALWAYS_LONG_GROUP, 0),
.len = { 0, },
- .data = { sc->sc_scan_cmd, },
+ .data = { NULL, },
.flags = IWM_CMD_SYNC,
- .dataflags = { IWM_HCMD_DFL_NOCOPY, },
};
- struct iwm_scan_cmd *cmd = sc->sc_scan_cmd;
- struct ieee80211com *ic = &sc->sc_ic;
- struct ieee80211vap *vap = TAILQ_FIRST(&ic->ic_vaps);
- int is_assoc = 0;
+ struct iwm_scan_req_umac *req;
+ struct iwm_scan_req_umac_tail *tail;
+ size_t req_len;
+ int ssid_len = 0;
+ const uint8_t *ssid = NULL;
int ret;
- uint32_t status;
- int basic_ssid =
- !(sc->sc_capaflags & IWM_UCODE_TLV_FLAGS_NO_BASIC_SSID);
- sc->sc_scanband = flags & (IEEE80211_CHAN_2GHZ | IEEE80211_CHAN_5GHZ);
+ req_len = sizeof(struct iwm_scan_req_umac) +
+ (sizeof(struct iwm_scan_channel_cfg_umac) *
+ sc->sc_capa_n_scan_channels) +
+ sizeof(struct iwm_scan_req_umac_tail);
+ if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
+ return ENOMEM;
+ req = malloc(req_len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (req == NULL)
+ return ENOMEM;
+
+ hcmd.len[0] = (uint16_t)req_len;
+ hcmd.data[0] = (void *)req;
+
+ IWM_DPRINTF(sc, IWM_DEBUG_SCAN, "Handling ieee80211 scan request\n");
+
+ /* These timings correspond to iwlwifi's UNASSOC scan. */
+ req->active_dwell = 10;
+ req->passive_dwell = 110;
+ req->fragmented_dwell = 44;
+ req->extended_dwell = 90;
+ req->max_out_time = 0;
+ req->suspend_time = 0;
+
+ req->scan_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
+ req->ooc_priority = htole32(IWM_SCAN_PRIORITY_HIGH);
+
+ req->n_channels = iwm_mvm_umac_scan_fill_channels(sc,
+ (struct iwm_scan_channel_cfg_umac *)req->data, ssid_len != 0);
+
+ req->general_flags = htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL |
+ IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE |
+ IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL);
+
+ tail = (void *)((char *)&req->data +
+ sizeof(struct iwm_scan_channel_cfg_umac) *
+ sc->sc_capa_n_scan_channels);
+
+ /* Check if we're doing an active directed scan. */
+ if (ssid_len != 0) {
+ tail->direct_scan[0].id = IEEE80211_ELEMID_SSID;
+ tail->direct_scan[0].len = ssid_len;
+ memcpy(tail->direct_scan[0].ssid, ssid, ssid_len);
+ req->general_flags |=
+ htole32(IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT);
+ } else {
+ req->general_flags |= htole32(IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE);
+ }
+
+ if (isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+ req->general_flags |=
+ htole32(IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED);
+
+ ret = iwm_mvm_fill_probe_req(sc, &tail->preq);
+ if (ret) {
+ free(req, M_DEVBUF);
+ return ret;
+ }
+
+ /* Specify the scan plan: We'll do one iteration. */
+ tail->schedule[0].interval = 0;
+ tail->schedule[0].iter_count = 1;
+
+ ret = iwm_send_cmd(sc, &hcmd);
+ if (!ret)
+ IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
+ "Scan request was sent successfully\n");
+ free(req, M_DEVBUF);
+ return ret;
+}
+
+int
+iwm_mvm_lmac_scan(struct iwm_softc *sc)
+{
+ struct iwm_host_cmd hcmd = {
+ .id = IWM_SCAN_OFFLOAD_REQUEST_CMD,
+ .len = { 0, },
+ .data = { NULL, },
+ .flags = IWM_CMD_SYNC,
+ };
+ struct iwm_scan_req_lmac *req;
+ size_t req_len;
+ int ret;
+ int ssid_len = 0;
+ const uint8_t *ssid = NULL;
IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
"Handling ieee80211 scan request\n");
- memset(cmd, 0, sc->sc_scan_cmd_len);
- cmd->quiet_time = htole16(IWM_ACTIVE_QUIET_TIME);
- cmd->quiet_plcp_th = htole16(IWM_PLCP_QUIET_THRESH);
- cmd->rxchain_sel_flags = iwm_mvm_scan_rx_chain(sc);
- cmd->max_out_time = iwm_mvm_scan_max_out_time(sc, 0, is_assoc);
- cmd->suspend_time = iwm_mvm_scan_suspend_time(sc, is_assoc);
- cmd->rxon_flags = iwm_mvm_scan_rxon_flags(sc, flags);
- cmd->filter_flags = htole32(IWM_MAC_FILTER_ACCEPT_GRP |
- IWM_MAC_FILTER_IN_BEACON);
+ req_len = sizeof(struct iwm_scan_req_lmac) +
+ (sizeof(struct iwm_scan_channel_cfg_lmac) *
+ sc->sc_capa_n_scan_channels) + sizeof(struct iwm_scan_probe_req);
+ if (req_len > IWM_MAX_CMD_PAYLOAD_SIZE)
+ return ENOMEM;
+ req = malloc(req_len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (req == NULL)
+ return ENOMEM;
+
+ hcmd.len[0] = (uint16_t)req_len;
+ hcmd.data[0] = (void *)req;
+
+ /* These timings correspond to iwlwifi's UNASSOC scan. */
+ req->active_dwell = 10;
+ req->passive_dwell = 110;
+ req->fragmented_dwell = 44;
+ req->extended_dwell = 90;
+ req->max_out_time = 0;
+ req->suspend_time = 0;
+
+ req->scan_prio = htole32(IWM_SCAN_PRIORITY_HIGH);
+ req->rx_chain_select = iwm_mvm_scan_rx_chain(sc);
+ req->iter_num = htole32(1);
+ req->delay = 0;
+
+ req->scan_flags = htole32(IWM_MVM_LMAC_SCAN_FLAG_PASS_ALL |
+ IWM_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE |
+ IWM_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL);
+ if (ssid_len == 0)
+ req->scan_flags |= htole32(IWM_MVM_LMAC_SCAN_FLAG_PASSIVE);
+ else
+ req->scan_flags |=
+ htole32(IWM_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION);
+ if (isset(sc->sc_enabled_capa,
+ IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT))
+ req->scan_flags |= htole32(IWM_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED);
+
+ req->flags = htole32(IWM_PHY_BAND_24);
+ if (sc->sc_nvm.sku_cap_band_52GHz_enable)
+ req->flags |= htole32(IWM_PHY_BAND_5);
+ req->filter_flags =
+ htole32(IWM_MAC_FILTER_ACCEPT_GRP | IWM_MAC_FILTER_IN_BEACON);
+
+ /* Tx flags 2 GHz. */
+ req->tx_cmd[0].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
+ IWM_TX_CMD_FLG_BT_DIS);
+ req->tx_cmd[0].rate_n_flags =
+ iwm_mvm_scan_rate_n_flags(sc, IEEE80211_CHAN_2GHZ, 1/*XXX*/);
+ req->tx_cmd[0].sta_id = sc->sc_aux_sta.sta_id;
- cmd->type = htole32(IWM_SCAN_TYPE_FORCED);
- cmd->repeats = htole32(1);
+ /* Tx flags 5 GHz. */
+ req->tx_cmd[1].tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
+ IWM_TX_CMD_FLG_BT_DIS);
+ req->tx_cmd[1].rate_n_flags =
+ iwm_mvm_scan_rate_n_flags(sc, IEEE80211_CHAN_5GHZ, 1/*XXX*/);
+ req->tx_cmd[1].sta_id = sc->sc_aux_sta.sta_id;
+
+ /* Check if we're doing an active directed scan. */
+ if (ssid_len != 0) {
+ req->direct_scan[0].id = IEEE80211_ELEMID_SSID;
+ req->direct_scan[0].len = ssid_len;
+ memcpy(req->direct_scan[0].ssid, ssid, ssid_len);
+ }
- /*
- * If the user asked for passive scan, don't change to active scan if
- * you see any activity on the channel - remain passive.
- */
- if (n_ssids > 0) {
- cmd->passive2active = htole16(1);
- cmd->scan_flags |= IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
-#if 0
- if (basic_ssid) {
- ssid = req->ssids[0].ssid;
- ssid_len = req->ssids[0].ssid_len;
- }
-#endif
- } else {
- cmd->passive2active = 0;
- cmd->scan_flags &= ~IWM_SCAN_FLAGS_PASSIVE2ACTIVE;
+ req->n_channels = iwm_mvm_lmac_scan_fill_channels(sc,
+ (struct iwm_scan_channel_cfg_lmac *)req->data,
+ ssid_len != 0);
+
+ ret = iwm_mvm_fill_probe_req(sc,
+ (struct iwm_scan_probe_req *)(req->data +
+ (sizeof(struct iwm_scan_channel_cfg_lmac) *
+ sc->sc_capa_n_scan_channels)));
+ if (ret) {
+ free(req, M_DEVBUF);
+ return ret;
}
- cmd->tx_cmd.tx_flags = htole32(IWM_TX_CMD_FLG_SEQ_CTL |
- IWM_TX_CMD_FLG_BT_DIS);
- cmd->tx_cmd.sta_id = sc->sc_aux_sta.sta_id;
- cmd->tx_cmd.life_time = htole32(IWM_TX_CMD_LIFE_TIME_INFINITE);
- cmd->tx_cmd.rate_n_flags = iwm_mvm_scan_rate_n_flags(sc, flags, 1/*XXX*/);
-
- cmd->tx_cmd.len = htole16(iwm_mvm_fill_probe_req(sc,
- (struct ieee80211_frame *)cmd->data,
- vap ? vap->iv_myaddr : ic->ic_macaddr, n_ssids,
- ssid, ssid_len, NULL, 0,
- sc->sc_capa_max_probe_len));
-
- cmd->channel_count
- = iwm_mvm_scan_fill_channels(sc, cmd, flags, n_ssids, basic_ssid);
-
- cmd->len = htole16(sizeof(struct iwm_scan_cmd) +
- le16toh(cmd->tx_cmd.len) +
- (cmd->channel_count * sizeof(struct iwm_scan_channel)));
- hcmd.len[0] = le16toh(cmd->len);
-
- status = IWM_SCAN_RESPONSE_OK;
- ret = iwm_mvm_send_cmd_status(sc, &hcmd, &status);
- if (!ret && status == IWM_SCAN_RESPONSE_OK) {
+ /* Specify the scan plan: We'll do one iteration. */
+ req->schedule[0].iterations = 1;
+ req->schedule[0].full_scan_mul = 1;
+
+ /* Disable EBS. */
+ req->channel_opt[0].non_ebs_ratio = 1;
+ req->channel_opt[1].non_ebs_ratio = 1;
+
+ ret = iwm_send_cmd(sc, &hcmd);
+ if (!ret) {
IWM_DPRINTF(sc, IWM_DEBUG_SCAN,
"Scan request was sent successfully\n");
- } else {
- /*
- * If the scan failed, it usually means that the FW was unable
- * to allocate the time events. Warn on it, but maybe we
- * should try to send the command again with different params.
- */
- ret = EIO;
}
+ free(req, M_DEVBUF);
return ret;
}
diff --git a/sys/dev/iwm/if_iwm_scan.h b/sys/dev/iwm/if_iwm_scan.h
index d753ff05c82d..a9e74cdbde06 100644
--- a/sys/dev/iwm/if_iwm_scan.h
+++ b/sys/dev/iwm/if_iwm_scan.h
@@ -106,8 +106,8 @@
#ifndef __IF_IWN_SCAN_H__
#define __IF_IWN_SCAN_H__
-extern int
-iwm_mvm_scan_request(struct iwm_softc *sc, int flags,
- int n_ssids, uint8_t *ssid, int ssid_len);
+extern int iwm_mvm_lmac_scan(struct iwm_softc *sc);
+extern int iwm_mvm_config_umac_scan(struct iwm_softc *);
+extern int iwm_mvm_umac_scan(struct iwm_softc *);
#endif /* __IF_IWN_SCAN_H__ */
diff --git a/sys/dev/iwm/if_iwm_time_event.c b/sys/dev/iwm/if_iwm_time_event.c
index 8396218e2228..706b6c62c54d 100644
--- a/sys/dev/iwm/if_iwm_time_event.c
+++ b/sys/dev/iwm/if_iwm_time_event.c
@@ -257,8 +257,7 @@ iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
htole32(IWM_FW_CMD_ID_AND_COLOR(IWM_DEFAULT_MACID, IWM_DEFAULT_COLOR));
time_cmd.id = htole32(IWM_TE_BSS_STA_AGGRESSIVE_ASSOC);
- time_cmd.apply_time = htole32(iwm_read_prph(sc,
- IWM_DEVICE_SYSTEM_TIME_REG));
+ time_cmd.apply_time = htole32(0);
time_cmd.max_frags = IWM_TE_V2_FRAG_NONE;
time_cmd.max_delay = htole32(max_delay);
@@ -268,7 +267,8 @@ iwm_mvm_protect_session(struct iwm_softc *sc, struct iwm_node *in,
time_cmd.repeat = 1;
time_cmd.policy
= htole32(IWM_TE_V2_NOTIF_HOST_EVENT_START |
- IWM_TE_V2_NOTIF_HOST_EVENT_END);
+ IWM_TE_V2_NOTIF_HOST_EVENT_END |
+ IWM_T2_V2_START_IMMEDIATELY);
iwm_mvm_time_event_send_add(sc, in, /*te_data*/NULL, &time_cmd);
}
diff --git a/sys/dev/iwm/if_iwm_util.c b/sys/dev/iwm/if_iwm_util.c
index 0f0aef8d359c..88d5744debe2 100644
--- a/sys/dev/iwm/if_iwm_util.c
+++ b/sys/dev/iwm/if_iwm_util.c
@@ -169,7 +169,7 @@ iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
{
struct iwm_tx_ring *ring = &sc->txq[IWM_MVM_CMD_QUEUE];
struct iwm_tfd *desc;
- struct iwm_tx_data *data;
+ struct iwm_tx_data *txdata = NULL;
struct iwm_device_cmd *cmd;
struct mbuf *m;
bus_dma_segment_t seg;
@@ -178,11 +178,15 @@ iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
int error = 0, i, paylen, off;
int code;
int async, wantresp;
+ int group_id;
int nsegs;
+ size_t hdrlen, datasz;
+ uint8_t *data;
code = hcmd->id;
async = hcmd->flags & IWM_CMD_ASYNC;
wantresp = hcmd->flags & IWM_CMD_WANT_SKB;
+ data = NULL;
for (i = 0, paylen = 0; i < nitems(hcmd->len); i++) {
paylen += hcmd->len[i];
@@ -207,17 +211,27 @@ iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
}
desc = &ring->desc[ring->cur];
- data = &ring->data[ring->cur];
+ txdata = &ring->data[ring->cur];
- if (paylen > sizeof(cmd->data)) {
+ group_id = iwm_cmd_groupid(code);
+ if (group_id != 0) {
+ hdrlen = sizeof(cmd->hdr_wide);
+ datasz = sizeof(cmd->data_wide);
+ } else {
+ hdrlen = sizeof(cmd->hdr);
+ datasz = sizeof(cmd->data);
+ }
+
+ if (paylen > datasz) {
IWM_DPRINTF(sc, IWM_DEBUG_CMD,
"large command paylen=%u len0=%u\n",
paylen, hcmd->len[0]);
/* Command is too large */
+ size_t totlen = hdrlen + paylen;
if (paylen > IWM_MAX_CMD_PAYLOAD_SIZE) {
device_printf(sc->sc_dev,
"firmware command too long (%zd bytes)\n",
- paylen + sizeof(cmd->hdr));
+ totlen);
error = EINVAL;
goto out;
}
@@ -229,30 +243,41 @@ iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
error = bus_dmamap_load_mbuf_sg(ring->data_dmat,
- data->map, m, &seg, &nsegs, BUS_DMA_NOWAIT);
+ txdata->map, m, &seg, &nsegs, BUS_DMA_NOWAIT);
if (error != 0) {
device_printf(sc->sc_dev,
"%s: can't map mbuf, error %d\n", __func__, error);
m_freem(m);
goto out;
}
- data->m = m; /* mbuf will be freed in iwm_cmd_done() */
+ txdata->m = m; /* mbuf will be freed in iwm_cmd_done() */
cmd = mtod(m, struct iwm_device_cmd *);
paddr = seg.ds_addr;
} else {
cmd = &ring->cmd[ring->cur];
- paddr = data->cmd_paddr;
+ paddr = txdata->cmd_paddr;
}
- cmd->hdr.code = code;
- cmd->hdr.flags = 0;
- cmd->hdr.qid = ring->qid;
- cmd->hdr.idx = ring->cur;
+ if (group_id != 0) {
+ cmd->hdr_wide.opcode = iwm_cmd_opcode(code);
+ cmd->hdr_wide.group_id = group_id;
+ cmd->hdr_wide.qid = ring->qid;
+ cmd->hdr_wide.idx = ring->cur;
+ cmd->hdr_wide.length = htole16(paylen);
+ cmd->hdr_wide.version = iwm_cmd_version(code);
+ data = cmd->data_wide;
+ } else {
+ cmd->hdr.code = iwm_cmd_opcode(code);
+ cmd->hdr.flags = 0;
+ cmd->hdr.qid = ring->qid;
+ cmd->hdr.idx = ring->cur;
+ data = cmd->data;
+ }
for (i = 0, off = 0; i < nitems(hcmd->data); i++) {
if (hcmd->len[i] == 0)
continue;
- memcpy(cmd->data + off, hcmd->data[i], hcmd->len[i]);
+ memcpy(data + off, hcmd->data[i], hcmd->len[i]);
off += hcmd->len[i];
}
KASSERT(off == paylen, ("off %d != paylen %d", off, paylen));
@@ -261,18 +286,17 @@ iwm_send_cmd(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
addr_lo = htole32((uint32_t)paddr);
memcpy(&desc->tbs[0].lo, &addr_lo, sizeof(uint32_t));
desc->tbs[0].hi_n_len = htole16(iwm_get_dma_hi_addr(paddr)
- | ((sizeof(cmd->hdr) + paylen) << 4));
+ | ((hdrlen + paylen) << 4));
desc->num_tbs = 1;
IWM_DPRINTF(sc, IWM_DEBUG_CMD,
- "%s: iwm_send_cmd 0x%x size=%lu %s\n",
- __func__,
+ "iwm_send_cmd 0x%x size=%lu %s\n",
code,
- (unsigned long) (hcmd->len[0] + hcmd->len[1] + sizeof(cmd->hdr)),
+ (unsigned long) (hcmd->len[0] + hcmd->len[1] + hdrlen),
async ? " (async)" : "");
- if (paylen > sizeof(cmd->data)) {
- bus_dmamap_sync(ring->data_dmat, data->map,
+ if (paylen > datasz) {
+ bus_dmamap_sync(ring->data_dmat, txdata->map,
BUS_DMASYNC_PREWRITE);
} else {
bus_dmamap_sync(ring->cmd_dma.tag, ring->cmd_dma.map,
@@ -404,3 +428,31 @@ iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd)
sc->sc_wantresp = -1;
wakeup(&sc->sc_wantresp);
}
+
+uint8_t
+iwm_fw_valid_tx_ant(struct iwm_softc *sc)
+{
+ uint8_t tx_ant;
+
+ tx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN)
+ >> IWM_FW_PHY_CFG_TX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_tx_ant)
+ tx_ant &= sc->sc_nvm.valid_tx_ant;
+
+ return tx_ant;
+}
+
+uint8_t
+iwm_fw_valid_rx_ant(struct iwm_softc *sc)
+{
+ uint8_t rx_ant;
+
+ rx_ant = ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN)
+ >> IWM_FW_PHY_CFG_RX_CHAIN_POS);
+
+ if (sc->sc_nvm.valid_rx_ant)
+ rx_ant &= sc->sc_nvm.valid_rx_ant;
+
+ return rx_ant;
+}
diff --git a/sys/dev/iwm/if_iwm_util.h b/sys/dev/iwm/if_iwm_util.h
index 2d25a34ddd98..144f87ae7131 100644
--- a/sys/dev/iwm/if_iwm_util.h
+++ b/sys/dev/iwm/if_iwm_util.h
@@ -116,4 +116,7 @@ extern int iwm_mvm_send_cmd_pdu_status(struct iwm_softc *sc, uint8_t id,
uint16_t len, const void *data, uint32_t *status);
extern void iwm_free_resp(struct iwm_softc *sc, struct iwm_host_cmd *hcmd);
+extern uint8_t iwm_fw_valid_tx_ant(struct iwm_softc *sc);
+extern uint8_t iwm_fw_valid_rx_ant(struct iwm_softc *sc);
+
#endif /* __IF_IWM_UTIL_H__ */
diff --git a/sys/dev/iwm/if_iwmreg.h b/sys/dev/iwm/if_iwmreg.h
index 12cfd966d47c..e68f4d48ac3f 100644
--- a/sys/dev/iwm/if_iwmreg.h
+++ b/sys/dev/iwm/if_iwmreg.h
@@ -1,4 +1,4 @@
-/* $OpenBSD: if_iwmreg.h,v 1.3 2015/02/23 10:25:20 stsp Exp $ */
+/* $OpenBSD: if_iwmreg.h,v 1.4 2015/06/15 08:06:11 stsp Exp $ */
/* $FreeBSD$ */
/******************************************************************************
@@ -136,6 +136,9 @@
#define IWM_CSR_UCODE_DRV_GP1_CLR (0x05c)
#define IWM_CSR_UCODE_DRV_GP2 (0x060)
+#define IWM_CSR_MBOX_SET_REG (0x088)
+#define IWM_CSR_MBOX_SET_REG_OS_ALIVE 0x20
+
#define IWM_CSR_LED_REG (0x094)
#define IWM_CSR_DRAM_INT_TBL_REG (0x0A0)
#define IWM_CSR_MAC_SHADOW_REG_CTRL (0x0A8) /* 6000 and up */
@@ -182,6 +185,8 @@
#define IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
#define IWM_CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
#define IWM_CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
+#define IWM_CSR_HW_IF_CONFIG_REG_ENABLE_PME (0x10000000)
+#define IWM_CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
#define IWM_CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define IWM_CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
@@ -308,6 +313,7 @@
#define IWM_CSR_HW_REV_TYPE_2x00 (0x0000100)
#define IWM_CSR_HW_REV_TYPE_105 (0x0000110)
#define IWM_CSR_HW_REV_TYPE_135 (0x0000120)
+#define IWM_CSR_HW_REV_TYPE_7265D (0x0000210)
#define IWM_CSR_HW_REV_TYPE_NONE (0x00001F0)
/* EEPROM REG */
@@ -402,6 +408,7 @@
/* DRAM INT TABLE */
#define IWM_CSR_DRAM_INT_TBL_ENABLE (1 << 31)
+#define IWM_CSR_DRAM_INIT_TBL_WRITE_POINTER (1 << 28)
#define IWM_CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
/* SECURE boot registers */
@@ -421,19 +428,37 @@ enum iwm_secure_boot_status_reg {
IWM_CSR_SECURE_BOOT_CPU_STATUS_SIGN_VERF_FAIL = 0x00000010,
};
-#define IWM_CSR_UCODE_LOAD_STATUS_ADDR (0x100)
+#define IWM_FH_UCODE_LOAD_STATUS 0x1af0
+#define IWM_CSR_UCODE_LOAD_STATUS_ADDR 0x1e70
enum iwm_secure_load_status_reg {
- IWM_CSR_CPU_STATUS_LOADING_STARTED = 0x00000001,
- IWM_CSR_CPU_STATUS_LOADING_COMPLETED = 0x00000002,
- IWM_CSR_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
- IWM_CSR_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
+ IWM_LMPM_CPU_UCODE_LOADING_STARTED = 0x00000001,
+ IWM_LMPM_CPU_HDRS_LOADING_COMPLETED = 0x00000003,
+ IWM_LMPM_CPU_UCODE_LOADING_COMPLETED = 0x00000007,
+ IWM_LMPM_CPU_STATUS_NUM_OF_LAST_COMPLETED = 0x000000F8,
+ IWM_LMPM_CPU_STATUS_NUM_OF_LAST_LOADED_BLOCK = 0x0000FF00,
};
+#define IWM_FH_MEM_TB_MAX_LENGTH 0x20000
+
+#define IWM_LMPM_SECURE_INSPECTOR_CODE_ADDR 0x1e38
+#define IWM_LMPM_SECURE_INSPECTOR_DATA_ADDR 0x1e3c
+#define IWM_LMPM_SECURE_UCODE_LOAD_CPU1_HDR_ADDR 0x1e78
+#define IWM_LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR 0x1e7c
-#define IWM_CSR_SECURE_INSPECTOR_CODE_ADDR (0x100)
-#define IWM_CSR_SECURE_INSPECTOR_DATA_ADDR (0x100)
+#define IWM_LMPM_SECURE_INSPECTOR_CODE_MEM_SPACE 0x400000
+#define IWM_LMPM_SECURE_INSPECTOR_DATA_MEM_SPACE 0x402000
+#define IWM_LMPM_SECURE_CPU1_HDR_MEM_SPACE 0x420000
+#define IWM_LMPM_SECURE_CPU2_HDR_MEM_SPACE 0x420400
#define IWM_CSR_SECURE_TIME_OUT (100)
+/* extended range in FW SRAM */
+#define IWM_FW_MEM_EXTENDED_START 0x40000
+#define IWM_FW_MEM_EXTENDED_END 0x57FFF
+
+/* FW chicken bits */
+#define IWM_LMPM_CHICK 0xa01ff8
+#define IWM_LMPM_CHICK_EXTENDED_ADDR_SPACE 0x01
+
#define IWM_FH_TCSR_0_REG0 (0x1D00)
/*
@@ -484,6 +509,32 @@ enum iwm_secure_load_status_reg {
#define IWM_HBUS_TARG_PRPH_WDAT (IWM_HBUS_BASE+0x04c)
#define IWM_HBUS_TARG_PRPH_RDAT (IWM_HBUS_BASE+0x050)
+/* enable the ID buf for read */
+#define IWM_WFPM_PS_CTL_CLR 0xa0300c
+#define IWM_WFMP_MAC_ADDR_0 0xa03080
+#define IWM_WFMP_MAC_ADDR_1 0xa03084
+#define IWM_LMPM_PMG_EN 0xa01cec
+#define IWM_RADIO_REG_SYS_MANUAL_DFT_0 0xad4078
+#define IWM_RFIC_REG_RD 0xad0470
+#define IWM_WFPM_CTRL_REG 0xa03030
+#define IWM_WFPM_AUX_CTL_AUX_IF_MAC_OWNER_MSK 0x08000000
+#define IWM_ENABLE_WFPM 0x80000000
+
+#define IWM_AUX_MISC_REG 0xa200b0
+#define IWM_HW_STEP_LOCATION_BITS 24
+
+#define IWM_AUX_MISC_MASTER1_EN 0xa20818
+#define IWM_AUX_MISC_MASTER1_EN_SBE_MSK 0x1
+#define IWM_AUX_MISC_MASTER1_SMPHR_STATUS 0xa20800
+#define IWM_RSA_ENABLE 0xa24b08
+#define IWM_PREG_AUX_BUS_WPROT_0 0xa04cc0
+#define IWM_SB_CFG_OVERRIDE_ADDR 0xa26c78
+#define IWM_SB_CFG_OVERRIDE_ENABLE 0x8000
+#define IWM_SB_CFG_BASE_OVERRIDE 0xa20000
+#define IWM_SB_MODIFY_CFG_FLAG 0xa03088
+#define IWM_SB_CPU_1_STATUS 0xa01e30
+#define IWM_SB_CPU_2_STATUS 0Xa01e34
+
/* Used to enable DBGM */
#define IWM_HBUS_TARG_TEST_REG (IWM_HBUS_BASE+0x05c)
@@ -567,7 +618,12 @@ enum iwm_dtd_diode_reg {
* containing CAM (Continuous Active Mode) indication.
* @IWM_UCODE_TLV_FLAGS_P2P_PS: P2P client power save is supported (only on a
* single bound interface).
+ * @IWM_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
+ * @IWM_UCODE_TLV_FLAGS_EBS_SUPPORT: this uCode image supports EBS.
* @IWM_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
+ * @IWM_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
+ * @IWM_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
+ *
*/
enum iwm_ucode_tlv_flag {
IWM_UCODE_TLV_FLAGS_PAN = (1 << 0),
@@ -590,8 +646,150 @@ enum iwm_ucode_tlv_flag {
IWM_UCODE_TLV_FLAGS_STA_KEY_CMD = (1 << 19),
IWM_UCODE_TLV_FLAGS_DEVICE_PS_CMD = (1 << 20),
IWM_UCODE_TLV_FLAGS_P2P_PS = (1 << 21),
+ IWM_UCODE_TLV_FLAGS_BSS_P2P_PS_DCM = (1 << 22),
+ IWM_UCODE_TLV_FLAGS_BSS_P2P_PS_SCM = (1 << 23),
IWM_UCODE_TLV_FLAGS_UAPSD_SUPPORT = (1 << 24),
+ IWM_UCODE_TLV_FLAGS_EBS_SUPPORT = (1 << 25),
IWM_UCODE_TLV_FLAGS_P2P_PS_UAPSD = (1 << 26),
+ IWM_UCODE_TLV_FLAGS_BCAST_FILTERING = (1 << 29),
+ IWM_UCODE_TLV_FLAGS_GO_UAPSD = (1 << 30),
+ IWM_UCODE_TLV_FLAGS_LTE_COEX = (1 << 31),
+};
+
+#define IWM_UCODE_TLV_FLAG_BITS \
+ "\020\1PAN\2NEWSCAN\3MFP\4P2P\5DW_BC_TABLE\6NEWBT_COEX\7PM_CMD\10SHORT_BL\11RX_ENERG \
+Y\12TIME_EVENT_V2\13D3_6_IPV6\14BF_UPDATED\15NO_BASIC_SSID\17D3_CONTINUITY\20NEW_NSOFF \
+L_S\21NEW_NSOFFL_L\22SCHED_SCAN\24STA_KEY_CMD\25DEVICE_PS_CMD\26P2P_PS\27P2P_PS_DCM\30 \
+P2P_PS_SCM\31UAPSD_SUPPORT\32EBS\33P2P_PS_UAPSD\36BCAST_FILTERING\37GO_UAPSD\40LTE_COEX"
+
+/**
+ * enum iwm_ucode_tlv_api - ucode api
+ * @IWM_UCODE_TLV_API_FRAGMENTED_SCAN: This ucode supports active dwell time
+ * longer than the passive one, which is essential for fragmented scan.
+ * @IWM_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
+ * @IWM_UCODE_TLV_API_WIDE_CMD_HDR: ucode supports wide command header
+ * @IWM_UCODE_TLV_API_LQ_SS_PARAMS: Configure STBC/BFER via LQ CMD ss_params
+ * @IWM_UCODE_TLV_API_EXT_SCAN_PRIORITY: scan APIs use 8-level priority
+ * instead of 3.
+ * @IWM_UCODE_TLV_API_TX_POWER_CHAIN: TX power API has larger command size
+ * (command version 3) that supports per-chain limits
+ *
+ * @IWM_NUM_UCODE_TLV_API: number of bits used
+ */
+enum iwm_ucode_tlv_api {
+ IWM_UCODE_TLV_API_FRAGMENTED_SCAN = (1 << 8),
+ IWM_UCODE_TLV_API_WIFI_MCC_UPDATE = (1 << 9),
+ IWM_UCODE_TLV_API_WIDE_CMD_HDR = (1 << 14),
+ IWM_UCODE_TLV_API_LQ_SS_PARAMS = (1 << 18),
+ IWM_UCODE_TLV_API_EXT_SCAN_PRIORITY = (1 << 24),
+ IWM_UCODE_TLV_API_TX_POWER_CHAIN = (1 << 27),
+
+ IWM_NUM_UCODE_TLV_API = 32
+};
+
+#define IWM_UCODE_TLV_API_BITS \
+ "\020\10FRAGMENTED_SCAN\11WIFI_MCC_UPDATE\16WIDE_CMD_HDR\22LQ_SS_PARAMS\30EXT_SCAN_PRIO\33TX_POWER_CHAIN"
+
+/**
+ * enum iwm_ucode_tlv_capa - ucode capabilities
+ * @IWM_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
+ * @IWM_UCODE_TLV_CAPA_LAR_SUPPORT: supports Location Aware Regulatory
+ * @IWM_UCODE_TLV_CAPA_UMAC_SCAN: supports UMAC scan.
+ * @IWM_UCODE_TLV_CAPA_BEAMFORMER: supports Beamformer
+ * @IWM_UCODE_TLV_CAPA_TOF_SUPPORT: supports Time of Flight (802.11mc FTM)
+ * @IWM_UCODE_TLV_CAPA_TDLS_SUPPORT: support basic TDLS functionality
+ * @IWM_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT: supports insertion of current
+ * tx power value into TPC Report action frame and Link Measurement Report
+ * action frame
+ * @IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT: supports updating current
+ * channel in DS parameter set element in probe requests.
+ * @IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT: supports adding TPC Report IE in
+ * probe requests.
+ * @IWM_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
+ * @IWM_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
+ * which also implies support for the scheduler configuration command
+ * @IWM_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH: supports TDLS channel switching
+ * @IWM_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG: Consolidated D3-D0 image
+ * @IWM_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
+ * @IWM_UCODE_TLV_CAPA_DC2DC_SUPPORT: supports DC2DC Command
+ * @IWM_UCODE_TLV_CAPA_2G_COEX_SUPPORT: supports 2G coex Command
+ * @IWM_UCODE_TLV_CAPA_CSUM_SUPPORT: supports TCP Checksum Offload
+ * @IWM_UCODE_TLV_CAPA_RADIO_BEACON_STATS: support radio and beacon statistics
+ * @IWM_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD: support p2p standalone U-APSD
+ * @IWM_UCODE_TLV_CAPA_BT_COEX_PLCR: enabled BT Coex packet level co-running
+ * @IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC: ucode supports LAR updates with different
+ * sources for the MCC. This TLV bit is a future replacement to
+ * IWM_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
+ * is supported.
+ * @IWM_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
+ * @IWM_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan
+ * @IWM_UCODE_TLV_CAPA_NAN_SUPPORT: supports NAN
+ * @IWM_UCODE_TLV_CAPA_UMAC_UPLOAD: supports upload mode in umac (1=supported,
+ * 0=no support)
+ * @IWM_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE: extended DTS measurement
+ * @IWM_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS: supports short PM timeouts
+ * @IWM_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT: supports bt-coex Multi-priority LUT
+ * @IWM_UCODE_TLV_CAPA_BEACON_ANT_SELECTION: firmware will decide on what
+ * antenna the beacon should be transmitted
+ * @IWM_UCODE_TLV_CAPA_BEACON_STORING: firmware will store the latest beacon
+ * from AP and will send it upon d0i3 exit.
+ * @IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2: support LAR API V2
+ * @IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW: firmware responsible for CT-kill
+ * @IWM_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT: supports temperature
+ * thresholds reporting
+ * @IWM_UCODE_TLV_CAPA_CTDP_SUPPORT: supports cTDP command
+ * @IWM_UCODE_TLV_CAPA_USNIFFER_UNIFIED: supports usniffer enabled in
+ * regular image.
+ * @IWM_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG: support getting more shared
+ * memory addresses from the firmware.
+ * @IWM_UCODE_TLV_CAPA_LQM_SUPPORT: supports Link Quality Measurement
+ * @IWM_UCODE_TLV_CAPA_LMAC_UPLOAD: supports upload mode in lmac (1=supported,
+ * 0=no support)
+ *
+ * @IWM_NUM_UCODE_TLV_CAPA: number of bits used
+ */
+enum iwm_ucode_tlv_capa {
+ IWM_UCODE_TLV_CAPA_D0I3_SUPPORT = 0,
+ IWM_UCODE_TLV_CAPA_LAR_SUPPORT = 1,
+ IWM_UCODE_TLV_CAPA_UMAC_SCAN = 2,
+ IWM_UCODE_TLV_CAPA_BEAMFORMER = 3,
+ IWM_UCODE_TLV_CAPA_TOF_SUPPORT = 5,
+ IWM_UCODE_TLV_CAPA_TDLS_SUPPORT = 6,
+ IWM_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT = 8,
+ IWM_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT = 9,
+ IWM_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = 10,
+ IWM_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = 11,
+ IWM_UCODE_TLV_CAPA_DQA_SUPPORT = 12,
+ IWM_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH = 13,
+ IWM_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG = 17,
+ IWM_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = 18,
+ IWM_UCODE_TLV_CAPA_DC2DC_CONFIG_SUPPORT = 19,
+ IWM_UCODE_TLV_CAPA_2G_COEX_SUPPORT = 20,
+ IWM_UCODE_TLV_CAPA_CSUM_SUPPORT = 21,
+ IWM_UCODE_TLV_CAPA_RADIO_BEACON_STATS = 22,
+ IWM_UCODE_TLV_CAPA_P2P_STANDALONE_UAPSD = 26,
+ IWM_UCODE_TLV_CAPA_BT_COEX_PLCR = 28,
+ IWM_UCODE_TLV_CAPA_LAR_MULTI_MCC = 29,
+ IWM_UCODE_TLV_CAPA_BT_COEX_RRC = 30,
+ IWM_UCODE_TLV_CAPA_GSCAN_SUPPORT = 31,
+ IWM_UCODE_TLV_CAPA_NAN_SUPPORT = 34,
+ IWM_UCODE_TLV_CAPA_UMAC_UPLOAD = 35,
+ IWM_UCODE_TLV_CAPA_EXTENDED_DTS_MEASURE = 64,
+ IWM_UCODE_TLV_CAPA_SHORT_PM_TIMEOUTS = 65,
+ IWM_UCODE_TLV_CAPA_BT_MPLUT_SUPPORT = 67,
+ IWM_UCODE_TLV_CAPA_MULTI_QUEUE_RX_SUPPORT = 68,
+ IWM_UCODE_TLV_CAPA_BEACON_ANT_SELECTION = 71,
+ IWM_UCODE_TLV_CAPA_BEACON_STORING = 72,
+ IWM_UCODE_TLV_CAPA_LAR_SUPPORT_V2 = 73,
+ IWM_UCODE_TLV_CAPA_CT_KILL_BY_FW = 74,
+ IWM_UCODE_TLV_CAPA_TEMP_THS_REPORT_SUPPORT = 75,
+ IWM_UCODE_TLV_CAPA_CTDP_SUPPORT = 76,
+ IWM_UCODE_TLV_CAPA_USNIFFER_UNIFIED = 77,
+ IWM_UCODE_TLV_CAPA_LMAC_UPLOAD = 79,
+ IWM_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG = 80,
+ IWM_UCODE_TLV_CAPA_LQM_SUPPORT = 81,
+
+ IWM_NUM_UCODE_TLV_CAPA = 128
};
/* The default calibrate table size if not specified by firmware file */
@@ -617,8 +815,8 @@ enum iwm_ucode_sec {
* For 16.0 uCode and above, there is no differentiation between sections,
* just an offset to the HW address.
*/
-#define IWM_UCODE_SECTION_MAX 6
-#define IWM_UCODE_FIRST_SECTION_OF_SECOND_CPU (IWM_UCODE_SECTION_MAX/2)
+#define IWM_CPU1_CPU2_SEPARATOR_SECTION 0xFFFFCCCC
+#define IWM_PAGING_SEPARATOR_SECTION 0xAAAABBBB
/* uCode version contains 4 values: Major/Minor/API/Serial */
#define IWM_UCODE_MAJOR(ver) (((ver) & 0xFF000000) >> 24)
@@ -765,7 +963,17 @@ enum iwm_ucode_tlv_type {
* handling ucode version 9.
*/
IWM_UCODE_TLV_API_CHANGES_SET = 29,
- IWM_UCODE_TLV_ENABLED_CAPABILITIES = 30
+ IWM_UCODE_TLV_ENABLED_CAPABILITIES = 30,
+
+ IWM_UCODE_TLV_N_SCAN_CHANNELS = 31,
+ IWM_UCODE_TLV_PAGING = 32,
+ IWM_UCODE_TLV_SEC_RT_USNIFFER = 34,
+ IWM_UCODE_TLV_SDIO_ADMA_ADDR = 35,
+ IWM_UCODE_TLV_FW_VERSION = 36,
+ IWM_UCODE_TLV_FW_DBG_DEST = 38,
+ IWM_UCODE_TLV_FW_DBG_CONF = 39,
+ IWM_UCODE_TLV_FW_DBG_TRIGGER = 40,
+ IWM_UCODE_TLV_FW_GSCAN_CAPA = 50,
};
struct iwm_ucode_tlv {
@@ -774,6 +982,16 @@ struct iwm_ucode_tlv {
uint8_t data[0];
};
+struct iwm_ucode_api {
+ uint32_t api_index;
+ uint32_t api_flags;
+} __packed;
+
+struct iwm_ucode_capa {
+ uint32_t api_index;
+ uint32_t api_capa;
+} __packed;
+
#define IWM_TLV_UCODE_MAGIC 0x0a4c5749
struct iwm_tlv_ucode_header {
@@ -846,7 +1064,19 @@ struct iwm_tlv_ucode_header {
#define IWM_DEVICE_SYSTEM_TIME_REG 0xA0206C
/* Device NMI register */
-#define IWM_DEVICE_SET_NMI_REG 0x00a01c30
+#define IWM_DEVICE_SET_NMI_REG 0x00a01c30
+#define IWM_DEVICE_SET_NMI_VAL_HW 0x01
+#define IWM_DEVICE_SET_NMI_VAL_DRV 0x80
+#define IWM_DEVICE_SET_NMI_8000_REG 0x00a01c24
+#define IWM_DEVICE_SET_NMI_8000_VAL 0x1000000
+
+/*
+ * Device reset for family 8000
+ * write to bit 24 in order to reset the CPU
+ */
+#define IWM_RELEASE_CPU_RESET 0x300c
+#define IWM_RELEASE_CPU_RESET_BIT 0x1000000
+
/*****************************************************************************
* 7000/3000 series SHR DTS addresses *
@@ -960,6 +1190,8 @@ struct iwm_tlv_ucode_header {
#define IWM_SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK (0x0000007F)
#define IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS (16)
#define IWM_SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK (0x007F0000)
+#define IWM_SCD_GP_CTRL_ENABLE_31_QUEUES (1 << 0)
+#define IWM_SCD_GP_CTRL_AUTO_ACTIVE_MODE (1 << 18)
/* Context Data */
#define IWM_SCD_CONTEXT_MEM_LOWER_BOUND (IWM_SCD_MEM_LOWER_BOUND + 0x600)
@@ -993,6 +1225,8 @@ struct iwm_tlv_ucode_header {
#define IWM_SCD_CHAINEXT_EN (IWM_SCD_BASE + 0x244)
#define IWM_SCD_AGGR_SEL (IWM_SCD_BASE + 0x248)
#define IWM_SCD_INTERRUPT_MASK (IWM_SCD_BASE + 0x108)
+#define IWM_SCD_GP_CTRL (IWM_SCD_BASE + 0x1a8)
+#define IWM_SCD_EN_CTRL (IWM_SCD_BASE + 0x254)
static inline unsigned int IWM_SCD_QUEUE_WRPTR(unsigned int chnl)
{
@@ -1510,13 +1744,14 @@ struct iwm_agn_scd_bc_tbl {
* BEGIN mvm/fw-api.h
*/
-/* maximal number of Tx queues in any platform */
-#define IWM_MVM_MAX_QUEUES 20
+/* Maximum number of Tx queues. */
+#define IWM_MVM_MAX_QUEUES 31
/* Tx queue numbers */
enum {
IWM_MVM_OFFCHANNEL_QUEUE = 8,
IWM_MVM_CMD_QUEUE = 9,
+ IWM_MVM_AUX_QUEUE = 15,
};
enum iwm_mvm_tx_fifo {
@@ -1541,6 +1776,13 @@ enum {
IWM_PHY_CONTEXT_CMD = 0x8,
IWM_DBG_CFG = 0x9,
+ /* UMAC scan commands */
+ IWM_SCAN_ITERATION_COMPLETE_UMAC = 0xb5,
+ IWM_SCAN_CFG_CMD = 0xc,
+ IWM_SCAN_REQ_UMAC = 0xd,
+ IWM_SCAN_ABORT_UMAC = 0xe,
+ IWM_SCAN_COMPLETE_UMAC = 0xf,
+
/* station table */
IWM_ADD_STA_KEY = 0x17,
IWM_ADD_STA = 0x18,
@@ -1551,6 +1793,9 @@ enum {
IWM_TXPATH_FLUSH = 0x1e,
IWM_MGMT_MCAST_KEY = 0x1f,
+ /* scheduler config */
+ IWM_SCD_QUEUE_CFG = 0x1d,
+
/* global key */
IWM_WEP_KEY = 0x20,
@@ -1574,10 +1819,12 @@ enum {
/* Scan offload */
IWM_SCAN_OFFLOAD_REQUEST_CMD = 0x51,
IWM_SCAN_OFFLOAD_ABORT_CMD = 0x52,
- IWM_SCAN_OFFLOAD_COMPLETE = 0x6D,
- IWM_SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6E,
+ IWM_HOT_SPOT_CMD = 0x53,
+ IWM_SCAN_OFFLOAD_COMPLETE = 0x6d,
+ IWM_SCAN_OFFLOAD_UPDATE_PROFILES_CMD = 0x6e,
IWM_SCAN_OFFLOAD_CONFIG_CMD = 0x6f,
IWM_MATCH_FOUND_NOTIFICATION = 0xd9,
+ IWM_SCAN_ITERATION_COMPLETE = 0xe7,
/* Phy */
IWM_PHY_CONFIGURATION_CMD = 0x6a,
@@ -1616,6 +1863,8 @@ enum {
IWM_MISSED_BEACONS_NOTIFICATION = 0xa2,
+ IWM_MFUART_LOAD_NOTIFICATION = 0xb1,
+
/* Power - new power table command */
IWM_MAC_PM_POWER_TABLE = 0xa9,
@@ -1623,6 +1872,10 @@ enum {
IWM_REPLY_RX_MPDU_CMD = 0xc1,
IWM_BA_NOTIF = 0xc5,
+ /* Location Aware Regulatory */
+ IWM_MCC_UPDATE_CMD = 0xc8,
+ IWM_MCC_CHUB_UPDATE_CMD = 0xc9,
+
/* BT Coex */
IWM_BT_COEX_PRIO_TABLE = 0xcc,
IWM_BT_COEX_PROT_ENV = 0xcd,
@@ -1632,6 +1885,10 @@ enum {
IWM_REPLY_SF_CFG_CMD = 0xd1,
IWM_REPLY_BEACON_FILTERING_CMD = 0xd2,
+ /* DTS measurements */
+ IWM_CMD_DTS_MEASUREMENT_TRIGGER = 0xdc,
+ IWM_DTS_MEASUREMENT_NOTIFICATION = 0xdd,
+
IWM_REPLY_DEBUG_CMD = 0xf0,
IWM_DEBUG_LOG_MSG = 0xf7,
@@ -1800,10 +2057,14 @@ enum {
IWM_NVM_SECTION_TYPE_HW = 0,
IWM_NVM_SECTION_TYPE_SW,
IWM_NVM_SECTION_TYPE_PAPD,
- IWM_NVM_SECTION_TYPE_BT,
+ IWM_NVM_SECTION_TYPE_REGULATORY,
IWM_NVM_SECTION_TYPE_CALIBRATION,
IWM_NVM_SECTION_TYPE_PRODUCTION,
IWM_NVM_SECTION_TYPE_POST_FCS_CALIB,
+ /* 7, 8, 9 unknown */
+ IWM_NVM_SECTION_TYPE_HW_8000 = 10,
+ IWM_NVM_SECTION_TYPE_MAC_OVERRIDE,
+ IWM_NVM_SECTION_TYPE_PHY_SKU,
IWM_NVM_NUM_OF_SECTIONS,
};
@@ -1874,7 +2135,7 @@ enum {
#define IWM_ALIVE_FLG_RFKILL (1 << 0)
-struct iwm_mvm_alive_resp {
+struct iwm_mvm_alive_resp_v1 {
uint16_t status;
uint16_t flags;
uint8_t ucode_minor;
@@ -1896,6 +2157,59 @@ struct iwm_mvm_alive_resp {
uint32_t scd_base_ptr; /* SRAM address for SCD */
} __packed; /* IWM_ALIVE_RES_API_S_VER_1 */
+struct iwm_mvm_alive_resp_v2 {
+ uint16_t status;
+ uint16_t flags;
+ uint8_t ucode_minor;
+ uint8_t ucode_major;
+ uint16_t id;
+ uint8_t api_minor;
+ uint8_t api_major;
+ uint8_t ver_subtype;
+ uint8_t ver_type;
+ uint8_t mac;
+ uint8_t opt;
+ uint16_t reserved2;
+ uint32_t timestamp;
+ uint32_t error_event_table_ptr; /* SRAM address for error log */
+ uint32_t log_event_table_ptr; /* SRAM address for LMAC event log */
+ uint32_t cpu_register_ptr;
+ uint32_t dbgm_config_ptr;
+ uint32_t alive_counter_ptr;
+ uint32_t scd_base_ptr; /* SRAM address for SCD */
+ uint32_t st_fwrd_addr; /* pointer to Store and forward */
+ uint32_t st_fwrd_size;
+ uint8_t umac_minor; /* UMAC version: minor */
+ uint8_t umac_major; /* UMAC version: major */
+ uint16_t umac_id; /* UMAC version: id */
+ uint32_t error_info_addr; /* SRAM address for UMAC error log */
+ uint32_t dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_2 */
+
+struct iwm_mvm_alive_resp_v3 {
+ uint16_t status;
+ uint16_t flags;
+ uint32_t ucode_minor;
+ uint32_t ucode_major;
+ uint8_t ver_subtype;
+ uint8_t ver_type;
+ uint8_t mac;
+ uint8_t opt;
+ uint32_t timestamp;
+ uint32_t error_event_table_ptr; /* SRAM address for error log */
+ uint32_t log_event_table_ptr; /* SRAM address for LMAC event log */
+ uint32_t cpu_register_ptr;
+ uint32_t dbgm_config_ptr;
+ uint32_t alive_counter_ptr;
+ uint32_t scd_base_ptr; /* SRAM address for SCD */
+ uint32_t st_fwrd_addr; /* pointer to Store and forward */
+ uint32_t st_fwrd_size;
+ uint32_t umac_minor; /* UMAC version: minor */
+ uint32_t umac_major; /* UMAC version: major */
+ uint32_t error_info_addr; /* SRAM address for UMAC error log */
+ uint32_t dbg_print_buff_addr;
+} __packed; /* ALIVE_RES_API_S_VER_3 */
+
/* Error response/notification */
enum {
IWM_FW_ERR_UNKNOWN_CMD = 0x0,
@@ -2055,6 +2369,7 @@ enum {
IWM_TE_V1_NOTIF_HOST_FRAG_END = (1 << 5),
IWM_TE_V1_NOTIF_INTERNAL_FRAG_START = (1 << 6),
IWM_TE_V1_NOTIF_INTERNAL_FRAG_END = (1 << 7),
+ IWM_T2_V2_START_IMMEDIATELY = (1 << 11),
}; /* IWM_MAC_EVENT_ACTION_API_E_VER_2 */
@@ -2608,6 +2923,21 @@ struct iwm_missed_beacons_notif {
} __packed; /* IWM_MISSED_BEACON_NTFY_API_S_VER_3 */
/**
+ * struct iwm_mfuart_load_notif - mfuart image version & status
+ * ( IWM_MFUART_LOAD_NOTIFICATION = 0xb1 )
+ * @installed_ver: installed image version
+ * @external_ver: external image version
+ * @status: MFUART loading status
+ * @duration: MFUART loading time
+*/
+struct iwm_mfuart_load_notif {
+ uint32_t installed_ver;
+ uint32_t external_ver;
+ uint32_t status;
+ uint32_t duration;
+} __packed; /*MFU_LOADER_NTFY_API_S_VER_1*/
+
+/**
* struct iwm_set_calib_default_cmd - set default value for calibration.
* ( IWM_SET_CALIB_DEFAULT_CMD = 0x8e )
* @calib_index: the calibration to set value for
@@ -2884,6 +3214,18 @@ enum iwm_sf_scenario {
#define IWM_SF_W_MARK_LEGACY 4096
#define IWM_SF_W_MARK_SCAN 4096
+/* SF Scenarios timers for default configuration (aligned to 32 uSec) */
+#define IWM_SF_SINGLE_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWM_SF_SINGLE_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWM_SF_AGG_UNICAST_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWM_SF_AGG_UNICAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWM_SF_MCAST_IDLE_TIMER_DEF 160 /* 150 mSec */
+#define IWM_SF_MCAST_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWM_SF_BA_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWM_SF_BA_AGING_TIMER_DEF 400 /* 0.4 mSec */
+#define IWM_SF_TX_RE_IDLE_TIMER_DEF 160 /* 150 uSec */
+#define IWM_SF_TX_RE_AGING_TIMER_DEF 400 /* 0.4 mSec */
+
/* SF Scenarios timers for FULL_ON state (aligned to 32 uSec) */
#define IWM_SF_SINGLE_UNICAST_IDLE_TIMER 320 /* 300 uSec */
#define IWM_SF_SINGLE_UNICAST_AGING_TIMER 2016 /* 2 mSec */
@@ -2898,6 +3240,8 @@ enum iwm_sf_scenario {
#define IWM_SF_LONG_DELAY_AGING_TIMER 1000000 /* 1 Sec */
+#define IWM_SF_CFG_DUMMY_NOTIF_OFF (1 << 16)
+
/**
* Smart Fifo configuration command.
* @state: smart fifo state, types listed in iwm_sf_sate.
@@ -3900,6 +4244,18 @@ enum iwm_tx_flags {
IWM_TX_CMD_FLG_HCCA_CHUNK = (1 << 31)
}; /* IWM_TX_FLAGS_BITS_API_S_VER_1 */
+/**
+ * enum iwm_tx_pm_timeouts - pm timeout values in TX command
+ * @IWM_PM_FRAME_NONE: no need to suspend sleep mode
+ * @IWM_PM_FRAME_MGMT: fw suspend sleep mode for 100TU
+ * @IWM_PM_FRAME_ASSOC: fw suspend sleep mode for 10sec
+ */
+enum iwm_tx_pm_timeouts {
+ IWM_PM_FRAME_NONE = 0,
+ IWM_PM_FRAME_MGMT = 2,
+ IWM_PM_FRAME_ASSOC = 3,
+};
+
/*
* TX command security control
*/
@@ -4366,6 +4722,46 @@ static inline uint32_t iwm_mvm_get_scd_ssn(struct iwm_mvm_tx_resp *tx_resp)
* BEGIN mvm/fw-api-scan.h
*/
+/**
+ * struct iwm_scd_txq_cfg_cmd - New txq hw scheduler config command
+ * @token:
+ * @sta_id: station id
+ * @tid:
+ * @scd_queue: scheduler queue to confiug
+ * @enable: 1 queue enable, 0 queue disable
+ * @aggregate: 1 aggregated queue, 0 otherwise
+ * @tx_fifo: %enum iwm_mvm_tx_fifo
+ * @window: BA window size
+ * @ssn: SSN for the BA agreement
+ */
+struct iwm_scd_txq_cfg_cmd {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+ uint8_t enable;
+ uint8_t aggregate;
+ uint8_t tx_fifo;
+ uint8_t window;
+ uint16_t ssn;
+ uint16_t reserved;
+} __packed; /* SCD_QUEUE_CFG_CMD_API_S_VER_1 */
+
+/**
+ * struct iwm_scd_txq_cfg_rsp
+ * @token: taken from the command
+ * @sta_id: station id from the command
+ * @tid: tid from the command
+ * @scd_queue: scd_queue from the command
+ */
+struct iwm_scd_txq_cfg_rsp {
+ uint8_t token;
+ uint8_t sta_id;
+ uint8_t tid;
+ uint8_t scd_queue;
+} __packed; /* SCD_QUEUE_CFG_RSP_API_S_VER_1 */
+
+
/* Scan Commands, Responses, Notifications */
/* Masks for iwm_scan_channel.type flags */
@@ -4430,6 +4826,23 @@ struct iwm_ssid_ie {
uint8_t ssid[IEEE80211_NWID_LEN];
} __packed; /* IWM_SCAN_DIRECT_SSID_IE_API_S_VER_1 */
+/* scan offload */
+#define IWM_MAX_SCAN_CHANNELS 40
+#define IWM_SCAN_MAX_BLACKLIST_LEN 64
+#define IWM_SCAN_SHORT_BLACKLIST_LEN 16
+#define IWM_SCAN_MAX_PROFILES 11
+#define IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE 512
+
+/* Default watchdog (in MS) for scheduled scan iteration */
+#define IWM_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
+
+#define IWM_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
+#define IWM_CAN_ABORT_STATUS 1
+
+#define IWM_FULL_SCAN_MULTIPLIER 5
+#define IWM_FAST_SCHED_SCAN_ITERATIONS 3
+#define IWM_MAX_SCHED_SCAN_PLANS 2
+
/**
* iwm_scan_flags - masks for scan command flags
*@IWM_SCAN_FLAGS_PERIODIC_SCAN:
@@ -4473,64 +4886,194 @@ enum iwm_scan_type {
#define IWM_MAX_NUM_SCAN_CHANNELS 0x24
/**
- * struct iwm_scan_cmd - scan request command
- * ( IWM_SCAN_REQUEST_CMD = 0x80 )
- * @len: command length in bytes
- * @scan_flags: scan flags from IWM_SCAN_FLAGS_*
- * @channel_count: num of channels in channel list (1 - IWM_MAX_NUM_SCAN_CHANNELS)
- * @quiet_time: in msecs, dwell this time for active scan on quiet channels
- * @quiet_plcp_th: quiet PLCP threshold (channel is quiet if less than
- * this number of packets were received (typically 1)
- * @passive2active: is auto switching from passive to active during scan allowed
- * @rxchain_sel_flags: RXON_RX_CHAIN_*
- * @max_out_time: in usecs, max out of serving channel time
- * @suspend_time: how long to pause scan when returning to service channel:
- * bits 0-19: beacon interval in usecs (suspend before executing)
- * bits 20-23: reserved
- * bits 24-31: number of beacons (suspend between channels)
- * @rxon_flags: RXON_FLG_*
- * @filter_flags: RXON_FILTER_*
- * @tx_cmd: for active scans (zero for passive), w/o payload,
- * no RS so specify TX rate
- * @direct_scan: direct scan SSIDs
- * @type: one of IWM_SCAN_TYPE_*
- * @repeats: how many time to repeat the scan
- */
-struct iwm_scan_cmd {
+ * iwm_scan_schedule_lmac - schedule of scan offload
+ * @delay: delay between iterations, in seconds.
+ * @iterations: num of scan iterations
+ * @full_scan_mul: number of partial scans before each full scan
+ */
+struct iwm_scan_schedule_lmac {
+ uint16_t delay;
+ uint8_t iterations;
+ uint8_t full_scan_mul;
+} __packed; /* SCAN_SCHEDULE_API_S */
+
+/**
+ * iwm_scan_req_tx_cmd - SCAN_REQ_TX_CMD_API_S
+ * @tx_flags: combination of TX_CMD_FLG_*
+ * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
+ * cleared. Combination of RATE_MCS_*
+ * @sta_id: index of destination station in FW station table
+ * @reserved: for alignment and future use
+ */
+struct iwm_scan_req_tx_cmd {
+ uint32_t tx_flags;
+ uint32_t rate_n_flags;
+ uint8_t sta_id;
+ uint8_t reserved[3];
+} __packed;
+
+enum iwm_scan_channel_flags_lmac {
+ IWM_UNIFIED_SCAN_CHANNEL_FULL = (1 << 27),
+ IWM_UNIFIED_SCAN_CHANNEL_PARTIAL = (1 << 28),
+};
+
+/**
+ * iwm_scan_channel_cfg_lmac - SCAN_CHANNEL_CFG_S_VER2
+ * @flags: bits 1-20: directed scan to i'th ssid
+ * other bits &enum iwm_scan_channel_flags_lmac
+ * @channel_number: channel number 1-13 etc
+ * @iter_count: scan iteration on this channel
+ * @iter_interval: interval in seconds between iterations on one channel
+ */
+struct iwm_scan_channel_cfg_lmac {
+ uint32_t flags;
+ uint16_t channel_num;
+ uint16_t iter_count;
+ uint32_t iter_interval;
+} __packed;
+
+/*
+ * iwm_scan_probe_segment - PROBE_SEGMENT_API_S_VER_1
+ * @offset: offset in the data block
+ * @len: length of the segment
+ */
+struct iwm_scan_probe_segment {
+ uint16_t offset;
uint16_t len;
- uint8_t scan_flags;
- uint8_t channel_count;
- uint16_t quiet_time;
- uint16_t quiet_plcp_th;
- uint16_t passive2active;
- uint16_t rxchain_sel_flags;
+} __packed;
+
+/* iwm_scan_probe_req - PROBE_REQUEST_FRAME_API_S_VER_2
+ * @mac_header: first (and common) part of the probe
+ * @band_data: band specific data
+ * @common_data: last (and common) part of the probe
+ * @buf: raw data block
+ */
+struct iwm_scan_probe_req {
+ struct iwm_scan_probe_segment mac_header;
+ struct iwm_scan_probe_segment band_data[2];
+ struct iwm_scan_probe_segment common_data;
+ uint8_t buf[IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE];
+} __packed;
+
+enum iwm_scan_channel_flags {
+ IWM_SCAN_CHANNEL_FLAG_EBS = (1 << 0),
+ IWM_SCAN_CHANNEL_FLAG_EBS_ACCURATE = (1 << 1),
+ IWM_SCAN_CHANNEL_FLAG_CACHE_ADD = (1 << 2),
+};
+
+/* iwm_scan_channel_opt - CHANNEL_OPTIMIZATION_API_S
+ * @flags: enum iwm_scan_channel_flags
+ * @non_ebs_ratio: defines the ratio of number of scan iterations where EBS is
+ * involved.
+ * 1 - EBS is disabled.
+ * 2 - every second scan will be full scan(and so on).
+ */
+struct iwm_scan_channel_opt {
+ uint16_t flags;
+ uint16_t non_ebs_ratio;
+} __packed;
+
+/**
+ * iwm_mvm_lmac_scan_flags
+ * @IWM_MVM_LMAC_SCAN_FLAG_PASS_ALL: pass all beacons and probe responses
+ * without filtering.
+ * @IWM_MVM_LMAC_SCAN_FLAG_PASSIVE: force passive scan on all channels
+ * @IWM_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION: single channel scan
+ * @IWM_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE: send iteration complete notification
+ * @IWM_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS multiple SSID matching
+ * @IWM_MVM_LMAC_SCAN_FLAG_FRAGMENTED: all passive scans will be fragmented
+ * @IWM_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED: insert WFA vendor-specific TPC report
+ * and DS parameter set IEs into probe requests.
+ * @IWM_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL: use extended dwell time on channels
+ * 1, 6 and 11.
+ * @IWM_MVM_LMAC_SCAN_FLAG_MATCH: Send match found notification on matches
+ */
+enum iwm_mvm_lmac_scan_flags {
+ IWM_MVM_LMAC_SCAN_FLAG_PASS_ALL = (1 << 0),
+ IWM_MVM_LMAC_SCAN_FLAG_PASSIVE = (1 << 1),
+ IWM_MVM_LMAC_SCAN_FLAG_PRE_CONNECTION = (1 << 2),
+ IWM_MVM_LMAC_SCAN_FLAG_ITER_COMPLETE = (1 << 3),
+ IWM_MVM_LMAC_SCAN_FLAG_MULTIPLE_SSIDS = (1 << 4),
+ IWM_MVM_LMAC_SCAN_FLAG_FRAGMENTED = (1 << 5),
+ IWM_MVM_LMAC_SCAN_FLAGS_RRM_ENABLED = (1 << 6),
+ IWM_MVM_LMAC_SCAN_FLAG_EXTENDED_DWELL = (1 << 7),
+ IWM_MVM_LMAC_SCAN_FLAG_MATCH = (1 << 9),
+};
+
+enum iwm_scan_priority {
+ IWM_SCAN_PRIORITY_LOW,
+ IWM_SCAN_PRIORITY_MEDIUM,
+ IWM_SCAN_PRIORITY_HIGH,
+};
+
+/**
+ * iwm_scan_req_lmac - SCAN_REQUEST_CMD_API_S_VER_1
+ * @reserved1: for alignment and future use
+ * @channel_num: num of channels to scan
+ * @active-dwell: dwell time for active channels
+ * @passive-dwell: dwell time for passive channels
+ * @fragmented-dwell: dwell time for fragmented passive scan
+ * @extended_dwell: dwell time for channels 1, 6 and 11 (in certain cases)
+ * @reserved2: for alignment and future use
+ * @rx_chain_selct: PHY_RX_CHAIN_* flags
+ * @scan_flags: &enum iwm_mvm_lmac_scan_flags
+ * @max_out_time: max time (in TU) to be out of associated channel
+ * @suspend_time: pause scan this long (TUs) when returning to service channel
+ * @flags: RXON flags
+ * @filter_flags: RXON filter
+ * @tx_cmd: tx command for active scan; for 2GHz and for 5GHz
+ * @direct_scan: list of SSIDs for directed active scan
+ * @scan_prio: enum iwm_scan_priority
+ * @iter_num: number of scan iterations
+ * @delay: delay in seconds before first iteration
+ * @schedule: two scheduling plans. The first one is finite, the second one can
+ * be infinite.
+ * @channel_opt: channel optimization options, for full and partial scan
+ * @data: channel configuration and probe request packet.
+ */
+struct iwm_scan_req_lmac {
+ /* SCAN_REQUEST_FIXED_PART_API_S_VER_7 */
+ uint32_t reserved1;
+ uint8_t n_channels;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint8_t extended_dwell;
+ uint8_t reserved2;
+ uint16_t rx_chain_select;
+ uint32_t scan_flags;
uint32_t max_out_time;
uint32_t suspend_time;
- /* IWM_RX_ON_FLAGS_API_S_VER_1 */
- uint32_t rxon_flags;
+ /* RX_ON_FLAGS_API_S_VER_1 */
+ uint32_t flags;
uint32_t filter_flags;
- struct iwm_tx_cmd tx_cmd;
+ struct iwm_scan_req_tx_cmd tx_cmd[2];
struct iwm_ssid_ie direct_scan[IWM_PROBE_OPTION_MAX];
- uint32_t type;
- uint32_t repeats;
+ uint32_t scan_prio;
+ /* SCAN_REQ_PERIODIC_PARAMS_API_S */
+ uint32_t iter_num;
+ uint32_t delay;
+ struct iwm_scan_schedule_lmac schedule[IWM_MAX_SCHED_SCAN_PLANS];
+ struct iwm_scan_channel_opt channel_opt[2];
+ uint8_t data[];
+} __packed;
- /*
- * Probe request frame, followed by channel list.
- *
- * Size of probe request frame is specified by byte count in tx_cmd.
- * Channel list follows immediately after probe request frame.
- * Number of channels in list is specified by channel_count.
- * Each channel in list is of type:
- *
- * struct iwm_scan_channel channels[0];
- *
- * NOTE: Only one band of channels can be scanned per pass. You
- * must not mix 2.4GHz channels and 5.2GHz channels, and you must wait
- * for one scan to complete (i.e. receive IWM_SCAN_COMPLETE_NOTIFICATION)
- * before requesting another scan.
- */
- uint8_t data[0];
-} __packed; /* IWM_SCAN_REQUEST_FIXED_PART_API_S_VER_5 */
+/**
+ * iwm_scan_offload_complete - PERIODIC_SCAN_COMPLETE_NTF_API_S_VER_2
+ * @last_schedule_line: last schedule line executed (fast or regular)
+ * @last_schedule_iteration: last scan iteration executed before scan abort
+ * @status: enum iwm_scan_offload_complete_status
+ * @ebs_status: EBS success status &enum iwm_scan_ebs_status
+ * @time_after_last_iter; time in seconds elapsed after last iteration
+ */
+struct iwm_periodic_scan_complete {
+ uint8_t last_schedule_line;
+ uint8_t last_schedule_iteration;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_after_last_iter;
+ uint32_t reserved;
+} __packed;
/* Response to scan request contains only status with one of these values */
#define IWM_SCAN_RESPONSE_OK 0x1
@@ -4653,22 +5196,6 @@ struct iwm_scan_complete_notif {
struct iwm_scan_results_notif results[IWM_MAX_NUM_SCAN_CHANNELS];
} __packed; /* IWM_SCAN_COMPLETE_NTF_API_S_VER_2 */
-/* scan offload */
-#define IWM_MAX_SCAN_CHANNELS 40
-#define IWM_SCAN_MAX_BLACKLIST_LEN 64
-#define IWM_SCAN_SHORT_BLACKLIST_LEN 16
-#define IWM_SCAN_MAX_PROFILES 11
-#define IWM_SCAN_OFFLOAD_PROBE_REQ_SIZE 512
-
-/* Default watchdog (in MS) for scheduled scan iteration */
-#define IWM_SCHED_SCAN_WATCHDOG cpu_to_le16(15000)
-
-#define IWM_GOOD_CRC_TH_DEFAULT cpu_to_le16(1)
-#define IWM_CAN_ABORT_STATUS 1
-
-#define IWM_FULL_SCAN_MULTIPLIER 5
-#define IWM_FAST_SCHED_SCAN_ITERATIONS 3
-
enum iwm_scan_framework_client {
IWM_SCAN_CLIENT_SCHED_SCAN = (1 << 0),
IWM_SCAN_CLIENT_NETDETECT = (1 << 1),
@@ -4864,6 +5391,28 @@ enum iwm_scan_offload_compleate_status {
};
/**
+ * struct iwm_lmac_scan_complete_notif - notifies end of scanning (all channels)
+ * SCAN_COMPLETE_NTF_API_S_VER_3
+ * @scanned_channels: number of channels scanned (and number of valid results)
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: an array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwm_lmac_scan_complete_notif {
+ uint8_t scanned_channels;
+ uint8_t status;
+ uint8_t bt_status;
+ uint8_t last_channel;
+ uint32_t tsf_low;
+ uint32_t tsf_high;
+ struct iwm_scan_results_notif results[];
+} __packed;
+
+
+/**
* iwm_scan_offload_complete - IWM_SCAN_OFFLOAD_COMPLETE_NTF_API_S_VER_1
* @last_schedule_line: last schedule line executed (fast or regular)
* @last_schedule_iteration: last scan iteration executed before scan abort
@@ -4895,11 +5444,336 @@ struct iwm_sched_scan_results {
* BEGIN mvm/fw-api-sta.h
*/
+/* UMAC Scan API */
+
+/* The maximum of either of these cannot exceed 8, because we use an
+ * 8-bit mask (see IWM_MVM_SCAN_MASK).
+ */
+#define IWM_MVM_MAX_UMAC_SCANS 8
+#define IWM_MVM_MAX_LMAC_SCANS 1
+
+enum iwm_scan_config_flags {
+ IWM_SCAN_CONFIG_FLAG_ACTIVATE = (1 << 0),
+ IWM_SCAN_CONFIG_FLAG_DEACTIVATE = (1 << 1),
+ IWM_SCAN_CONFIG_FLAG_FORBID_CHUB_REQS = (1 << 2),
+ IWM_SCAN_CONFIG_FLAG_ALLOW_CHUB_REQS = (1 << 3),
+ IWM_SCAN_CONFIG_FLAG_SET_TX_CHAINS = (1 << 8),
+ IWM_SCAN_CONFIG_FLAG_SET_RX_CHAINS = (1 << 9),
+ IWM_SCAN_CONFIG_FLAG_SET_AUX_STA_ID = (1 << 10),
+ IWM_SCAN_CONFIG_FLAG_SET_ALL_TIMES = (1 << 11),
+ IWM_SCAN_CONFIG_FLAG_SET_EFFECTIVE_TIMES = (1 << 12),
+ IWM_SCAN_CONFIG_FLAG_SET_CHANNEL_FLAGS = (1 << 13),
+ IWM_SCAN_CONFIG_FLAG_SET_LEGACY_RATES = (1 << 14),
+ IWM_SCAN_CONFIG_FLAG_SET_MAC_ADDR = (1 << 15),
+ IWM_SCAN_CONFIG_FLAG_SET_FRAGMENTED = (1 << 16),
+ IWM_SCAN_CONFIG_FLAG_CLEAR_FRAGMENTED = (1 << 17),
+ IWM_SCAN_CONFIG_FLAG_SET_CAM_MODE = (1 << 18),
+ IWM_SCAN_CONFIG_FLAG_CLEAR_CAM_MODE = (1 << 19),
+ IWM_SCAN_CONFIG_FLAG_SET_PROMISC_MODE = (1 << 20),
+ IWM_SCAN_CONFIG_FLAG_CLEAR_PROMISC_MODE = (1 << 21),
+
+ /* Bits 26-31 are for num of channels in channel_array */
+#define IWM_SCAN_CONFIG_N_CHANNELS(n) ((n) << 26)
+};
+
+enum iwm_scan_config_rates {
+ /* OFDM basic rates */
+ IWM_SCAN_CONFIG_RATE_6M = (1 << 0),
+ IWM_SCAN_CONFIG_RATE_9M = (1 << 1),
+ IWM_SCAN_CONFIG_RATE_12M = (1 << 2),
+ IWM_SCAN_CONFIG_RATE_18M = (1 << 3),
+ IWM_SCAN_CONFIG_RATE_24M = (1 << 4),
+ IWM_SCAN_CONFIG_RATE_36M = (1 << 5),
+ IWM_SCAN_CONFIG_RATE_48M = (1 << 6),
+ IWM_SCAN_CONFIG_RATE_54M = (1 << 7),
+ /* CCK basic rates */
+ IWM_SCAN_CONFIG_RATE_1M = (1 << 8),
+ IWM_SCAN_CONFIG_RATE_2M = (1 << 9),
+ IWM_SCAN_CONFIG_RATE_5M = (1 << 10),
+ IWM_SCAN_CONFIG_RATE_11M = (1 << 11),
+
+ /* Bits 16-27 are for supported rates */
+#define IWM_SCAN_CONFIG_SUPPORTED_RATE(rate) ((rate) << 16)
+};
+
+enum iwm_channel_flags {
+ IWM_CHANNEL_FLAG_EBS = (1 << 0),
+ IWM_CHANNEL_FLAG_ACCURATE_EBS = (1 << 1),
+ IWM_CHANNEL_FLAG_EBS_ADD = (1 << 2),
+ IWM_CHANNEL_FLAG_PRE_SCAN_PASSIVE2ACTIVE = (1 << 3),
+};
+
+/**
+ * struct iwm_scan_config
+ * @flags: enum scan_config_flags
+ * @tx_chains: valid_tx antenna - ANT_* definitions
+ * @rx_chains: valid_rx antenna - ANT_* definitions
+ * @legacy_rates: default legacy rates - enum scan_config_rates
+ * @out_of_channel_time: default max out of serving channel time
+ * @suspend_time: default max suspend time
+ * @dwell_active: default dwell time for active scan
+ * @dwell_passive: default dwell time for passive scan
+ * @dwell_fragmented: default dwell time for fragmented scan
+ * @dwell_extended: default dwell time for channels 1, 6 and 11
+ * @mac_addr: default mac address to be used in probes
+ * @bcast_sta_id: the index of the station in the fw
+ * @channel_flags: default channel flags - enum iwm_channel_flags
+ * scan_config_channel_flag
+ * @channel_array: default supported channels
+ */
+struct iwm_scan_config {
+ uint32_t flags;
+ uint32_t tx_chains;
+ uint32_t rx_chains;
+ uint32_t legacy_rates;
+ uint32_t out_of_channel_time;
+ uint32_t suspend_time;
+ uint8_t dwell_active;
+ uint8_t dwell_passive;
+ uint8_t dwell_fragmented;
+ uint8_t dwell_extended;
+ uint8_t mac_addr[IEEE80211_ADDR_LEN];
+ uint8_t bcast_sta_id;
+ uint8_t channel_flags;
+ uint8_t channel_array[];
+} __packed; /* SCAN_CONFIG_DB_CMD_API_S */
+
+/**
+ * iwm_umac_scan_flags
+ *@IWM_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
+ * can be preempted by other scan requests with higher priority.
+ * The low priority scan will be resumed when the higher proirity scan is
+ * completed.
+ *@IWM_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
+ * when scan starts.
+ */
+enum iwm_umac_scan_flags {
+ IWM_UMAC_SCAN_FLAG_PREEMPTIVE = (1 << 0),
+ IWM_UMAC_SCAN_FLAG_START_NOTIF = (1 << 1),
+};
+
+enum iwm_umac_scan_uid_offsets {
+ IWM_UMAC_SCAN_UID_TYPE_OFFSET = 0,
+ IWM_UMAC_SCAN_UID_SEQ_OFFSET = 8,
+};
+
+enum iwm_umac_scan_general_flags {
+ IWM_UMAC_SCAN_GEN_FLAGS_PERIODIC = (1 << 0),
+ IWM_UMAC_SCAN_GEN_FLAGS_OVER_BT = (1 << 1),
+ IWM_UMAC_SCAN_GEN_FLAGS_PASS_ALL = (1 << 2),
+ IWM_UMAC_SCAN_GEN_FLAGS_PASSIVE = (1 << 3),
+ IWM_UMAC_SCAN_GEN_FLAGS_PRE_CONNECT = (1 << 4),
+ IWM_UMAC_SCAN_GEN_FLAGS_ITER_COMPLETE = (1 << 5),
+ IWM_UMAC_SCAN_GEN_FLAGS_MULTIPLE_SSID = (1 << 6),
+ IWM_UMAC_SCAN_GEN_FLAGS_FRAGMENTED = (1 << 7),
+ IWM_UMAC_SCAN_GEN_FLAGS_RRM_ENABLED = (1 << 8),
+ IWM_UMAC_SCAN_GEN_FLAGS_MATCH = (1 << 9),
+ IWM_UMAC_SCAN_GEN_FLAGS_EXTENDED_DWELL = (1 << 10),
+};
+
+/**
+ * struct iwm_scan_channel_cfg_umac
+ * @flags: bitmap - 0-19: directed scan to i'th ssid.
+ * @channel_num: channel number 1-13 etc.
+ * @iter_count: repetition count for the channel.
+ * @iter_interval: interval between two scan iterations on one channel.
+ */
+struct iwm_scan_channel_cfg_umac {
+ uint32_t flags;
+ uint8_t channel_num;
+ uint8_t iter_count;
+ uint16_t iter_interval;
+} __packed; /* SCAN_CHANNEL_CFG_S_VER2 */
+
+/**
+ * struct iwm_scan_umac_schedule
+ * @interval: interval in seconds between scan iterations
+ * @iter_count: num of scan iterations for schedule plan, 0xff for infinite loop
+ * @reserved: for alignment and future use
+ */
+struct iwm_scan_umac_schedule {
+ uint16_t interval;
+ uint8_t iter_count;
+ uint8_t reserved;
+} __packed; /* SCAN_SCHED_PARAM_API_S_VER_1 */
+
+/**
+ * struct iwm_scan_req_umac_tail - the rest of the UMAC scan request command
+ * parameters following channels configuration array.
+ * @schedule: two scheduling plans.
+ * @delay: delay in TUs before starting the first scan iteration
+ * @reserved: for future use and alignment
+ * @preq: probe request with IEs blocks
+ * @direct_scan: list of SSIDs for directed active scan
+ */
+struct iwm_scan_req_umac_tail {
+ /* SCAN_PERIODIC_PARAMS_API_S_VER_1 */
+ struct iwm_scan_umac_schedule schedule[IWM_MAX_SCHED_SCAN_PLANS];
+ uint16_t delay;
+ uint16_t reserved;
+ /* SCAN_PROBE_PARAMS_API_S_VER_1 */
+ struct iwm_scan_probe_req preq;
+ struct iwm_ssid_ie direct_scan[IWM_PROBE_OPTION_MAX];
+} __packed;
+
+/**
+ * struct iwm_scan_req_umac
+ * @flags: &enum iwm_umac_scan_flags
+ * @uid: scan id, &enum iwm_umac_scan_uid_offsets
+ * @ooc_priority: out of channel priority - &enum iwm_scan_priority
+ * @general_flags: &enum iwm_umac_scan_general_flags
+ * @extended_dwell: dwell time for channels 1, 6 and 11
+ * @active_dwell: dwell time for active scan
+ * @passive_dwell: dwell time for passive scan
+ * @fragmented_dwell: dwell time for fragmented passive scan
+ * @max_out_time: max out of serving channel time
+ * @suspend_time: max suspend time
+ * @scan_priority: scan internal prioritization &enum iwm_scan_priority
+ * @channel_flags: &enum iwm_scan_channel_flags
+ * @n_channels: num of channels in scan request
+ * @reserved: for future use and alignment
+ * @data: &struct iwm_scan_channel_cfg_umac and
+ * &struct iwm_scan_req_umac_tail
+ */
+struct iwm_scan_req_umac {
+ uint32_t flags;
+ uint32_t uid;
+ uint32_t ooc_priority;
+ /* SCAN_GENERAL_PARAMS_API_S_VER_1 */
+ uint32_t general_flags;
+ uint8_t extended_dwell;
+ uint8_t active_dwell;
+ uint8_t passive_dwell;
+ uint8_t fragmented_dwell;
+ uint32_t max_out_time;
+ uint32_t suspend_time;
+ uint32_t scan_priority;
+ /* SCAN_CHANNEL_PARAMS_API_S_VER_1 */
+ uint8_t channel_flags;
+ uint8_t n_channels;
+ uint16_t reserved;
+ uint8_t data[];
+} __packed; /* SCAN_REQUEST_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwm_umac_scan_abort
+ * @uid: scan id, &enum iwm_umac_scan_uid_offsets
+ * @flags: reserved
+ */
+struct iwm_umac_scan_abort {
+ uint32_t uid;
+ uint32_t flags;
+} __packed; /* SCAN_ABORT_CMD_UMAC_API_S_VER_1 */
+
+/**
+ * struct iwm_umac_scan_complete
+ * @uid: scan id, &enum iwm_umac_scan_uid_offsets
+ * @last_schedule: last scheduling line
+ * @last_iter: last scan iteration number
+ * @scan status: &enum iwm_scan_offload_complete_status
+ * @ebs_status: &enum iwm_scan_ebs_status
+ * @time_from_last_iter: time elapsed from last iteration
+ * @reserved: for future use
+ */
+struct iwm_umac_scan_complete {
+ uint32_t uid;
+ uint8_t last_schedule;
+ uint8_t last_iter;
+ uint8_t status;
+ uint8_t ebs_status;
+ uint32_t time_from_last_iter;
+ uint32_t reserved;
+} __packed; /* SCAN_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+#define IWM_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN 5
+/**
+ * struct iwm_scan_offload_profile_match - match information
+ * @bssid: matched bssid
+ * @channel: channel where the match occurred
+ * @energy:
+ * @matching_feature:
+ * @matching_channels: bitmap of channels that matched, referencing
+ * the channels passed in tue scan offload request
+ */
+struct iwm_scan_offload_profile_match {
+ uint8_t bssid[IEEE80211_ADDR_LEN];
+ uint16_t reserved;
+ uint8_t channel;
+ uint8_t energy;
+ uint8_t matching_feature;
+ uint8_t matching_channels[IWM_SCAN_OFFLOAD_MATCHING_CHANNELS_LEN];
+} __packed; /* SCAN_OFFLOAD_PROFILE_MATCH_RESULTS_S_VER_1 */
+
+/**
+ * struct iwm_scan_offload_profiles_query - match results query response
+ * @matched_profiles: bitmap of matched profiles, referencing the
+ * matches passed in the scan offload request
+ * @last_scan_age: age of the last offloaded scan
+ * @n_scans_done: number of offloaded scans done
+ * @gp2_d0u: GP2 when D0U occurred
+ * @gp2_invoked: GP2 when scan offload was invoked
+ * @resume_while_scanning: not used
+ * @self_recovery: obsolete
+ * @reserved: reserved
+ * @matches: array of match information, one for each match
+ */
+struct iwm_scan_offload_profiles_query {
+ uint32_t matched_profiles;
+ uint32_t last_scan_age;
+ uint32_t n_scans_done;
+ uint32_t gp2_d0u;
+ uint32_t gp2_invoked;
+ uint8_t resume_while_scanning;
+ uint8_t self_recovery;
+ uint16_t reserved;
+ struct iwm_scan_offload_profile_match matches[IWM_SCAN_MAX_PROFILES];
+} __packed; /* SCAN_OFFLOAD_PROFILES_QUERY_RSP_S_VER_2 */
+
+/**
+ * struct iwm_umac_scan_iter_complete_notif - notifies end of scanning iteration
+ * @uid: scan id, &enum iwm_umac_scan_uid_offsets
+ * @scanned_channels: number of channels scanned and number of valid elements in
+ * results array
+ * @status: one of SCAN_COMP_STATUS_*
+ * @bt_status: BT on/off status
+ * @last_channel: last channel that was scanned
+ * @tsf_low: TSF timer (lower half) in usecs
+ * @tsf_high: TSF timer (higher half) in usecs
+ * @results: array of scan results, only "scanned_channels" of them are valid
+ */
+struct iwm_umac_scan_iter_complete_notif {
+ uint32_t uid;
+ uint8_t scanned_channels;
+ uint8_t status;
+ uint8_t bt_status;
+ uint8_t last_channel;
+ uint32_t tsf_low;
+ uint32_t tsf_high;
+ struct iwm_scan_results_notif results[];
+} __packed; /* SCAN_ITER_COMPLETE_NTF_UMAC_API_S_VER_1 */
+
+/* Please keep this enum *SORTED* by hex value.
+ * Needed for binary search, otherwise a warning will be triggered.
+ */
+enum iwm_scan_subcmd_ids {
+ IWM_GSCAN_START_CMD = 0x0,
+ IWM_GSCAN_STOP_CMD = 0x1,
+ IWM_GSCAN_SET_HOTLIST_CMD = 0x2,
+ IWM_GSCAN_RESET_HOTLIST_CMD = 0x3,
+ IWM_GSCAN_SET_SIGNIFICANT_CHANGE_CMD = 0x4,
+ IWM_GSCAN_RESET_SIGNIFICANT_CHANGE_CMD = 0x5,
+ IWM_GSCAN_SIGNIFICANT_CHANGE_EVENT = 0xFD,
+ IWM_GSCAN_HOTLIST_CHANGE_EVENT = 0xFE,
+ IWM_GSCAN_RESULTS_AVAILABLE_EVENT = 0xFF,
+};
+
+/* STA API */
+
/**
* enum iwm_sta_flags - flags for the ADD_STA host command
* @IWM_STA_FLG_REDUCED_TX_PWR_CTRL:
* @IWM_STA_FLG_REDUCED_TX_PWR_DATA:
- * @IWM_STA_FLG_FLG_ANT_MSK: Antenna selection
+ * @IWM_STA_FLG_DISABLE_TX: set if TX should be disabled
* @IWM_STA_FLG_PS: set if STA is in Power Save
* @IWM_STA_FLG_INVALID: set if STA is invalid
* @IWM_STA_FLG_DLP_EN: Direct Link Protocol is enabled
@@ -4923,10 +5797,7 @@ enum iwm_sta_flags {
IWM_STA_FLG_REDUCED_TX_PWR_CTRL = (1 << 3),
IWM_STA_FLG_REDUCED_TX_PWR_DATA = (1 << 6),
- IWM_STA_FLG_FLG_ANT_A = (1 << 4),
- IWM_STA_FLG_FLG_ANT_B = (2 << 4),
- IWM_STA_FLG_FLG_ANT_MSK = (IWM_STA_FLG_FLG_ANT_A |
- IWM_STA_FLG_FLG_ANT_B),
+ IWM_STA_FLG_DISABLE_TX = (1 << 4),
IWM_STA_FLG_PS = (1 << 8),
IWM_STA_FLG_DRAIN_FLOW = (1 << 12),
@@ -5004,7 +5875,7 @@ enum iwm_sta_key_flag {
/**
* enum iwm_sta_modify_flag - indicate to the fw what flag are being changed
- * @IWM_STA_MODIFY_KEY: this command modifies %key
+ * @IWM_STA_MODIFY_QUEUE_REMOVAL: this command removes a queue
* @IWM_STA_MODIFY_TID_DISABLE_TX: this command modifies %tid_disable_tx
* @IWM_STA_MODIFY_TX_RATE: unused
* @IWM_STA_MODIFY_ADD_BA_TID: this command modifies %add_immediate_ba_tid
@@ -5014,7 +5885,7 @@ enum iwm_sta_key_flag {
* @IWM_STA_MODIFY_QUEUES: modify the queues used by this station
*/
enum iwm_sta_modify_flag {
- IWM_STA_MODIFY_KEY = (1 << 0),
+ IWM_STA_MODIFY_QUEUE_REMOVAL = (1 << 0),
IWM_STA_MODIFY_TID_DISABLE_TX = (1 << 1),
IWM_STA_MODIFY_TX_RATE = (1 << 2),
IWM_STA_MODIFY_ADD_BA_TID = (1 << 3),
@@ -5031,11 +5902,14 @@ enum iwm_sta_modify_flag {
* @IWM_STA_SLEEP_STATE_AWAKE:
* @IWM_STA_SLEEP_STATE_PS_POLL:
* @IWM_STA_SLEEP_STATE_UAPSD:
+ * @IWM_STA_SLEEP_STATE_MOREDATA: set more-data bit on
+ * (last) released frame
*/
enum iwm_sta_sleep_flag {
IWM_STA_SLEEP_STATE_AWAKE = 0,
IWM_STA_SLEEP_STATE_PS_POLL = (1 << 0),
IWM_STA_SLEEP_STATE_UAPSD = (1 << 1),
+ IWM_STA_SLEEP_STATE_MOREDATA = (1 << 2),
};
/* STA ID and color bits definitions */
@@ -5083,23 +5957,25 @@ struct iwm_mvm_keyinfo {
uint64_t hw_tkip_mic_tx_key;
} __packed;
+#define IWM_ADD_STA_STATUS_MASK 0xFF
+#define IWM_ADD_STA_BAID_VALID_MASK 0x8000
+#define IWM_ADD_STA_BAID_MASK 0x7F00
+#define IWM_ADD_STA_BAID_SHIFT 8
+
/**
- * struct iwm_mvm_add_sta_cmd_v5 - Add/modify a station in the fw's sta table.
- * ( IWM_REPLY_ADD_STA = 0x18 )
+ * struct iwm_mvm_add_sta_cmd_v7 - Add/modify a station in the fw's sta table.
+ * ( REPLY_ADD_STA = 0x18 )
* @add_modify: 1: modify existing, 0: add new station
- * @unicast_tx_key_id: unicast tx key id. Relevant only when unicast key sent
- * @multicast_tx_key_id: multicast tx key id. Relevant only when multicast key
- * sent
+ * @awake_acs:
+ * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
+ * AMPDU for tid x. Set %IWM_STA_MODIFY_TID_DISABLE_TX to change this field.
* @mac_id_n_color: the Mac context this station belongs to
* @addr[IEEE80211_ADDR_LEN]: station's MAC address
* @sta_id: index of station in uCode's station table
* @modify_mask: IWM_STA_MODIFY_*, selects which parameters to modify vs. leave
* alone. 1 - modify, 0 - don't change.
- * @key: look at %iwm_mvm_keyinfo
* @station_flags: look at %iwm_sta_flags
* @station_flags_msk: what of %station_flags have changed
- * @tid_disable_tx: is tid BIT(tid) enabled for Tx. Clear BIT(x) to enable
- * AMPDU for tid x. Set %IWM_STA_MODIFY_TID_DISABLE_TX to change this field.
* @add_immediate_ba_tid: tid for which to add block-ack support (Rx)
* Set %IWM_STA_MODIFY_ADD_BA_TID to use this field, and also set
* add_immediate_ba_ssn.
@@ -5123,40 +5999,9 @@ struct iwm_mvm_keyinfo {
* ADD_STA sets up the table entry for one station, either creating a new
* entry, or modifying a pre-existing one.
*/
-struct iwm_mvm_add_sta_cmd_v5 {
+struct iwm_mvm_add_sta_cmd_v7 {
uint8_t add_modify;
- uint8_t unicast_tx_key_id;
- uint8_t multicast_tx_key_id;
- uint8_t reserved1;
- uint32_t mac_id_n_color;
- uint8_t addr[IEEE80211_ADDR_LEN];
- uint16_t reserved2;
- uint8_t sta_id;
- uint8_t modify_mask;
- uint16_t reserved3;
- struct iwm_mvm_keyinfo key;
- uint32_t station_flags;
- uint32_t station_flags_msk;
- uint16_t tid_disable_tx;
- uint16_t reserved4;
- uint8_t add_immediate_ba_tid;
- uint8_t remove_immediate_ba_tid;
- uint16_t add_immediate_ba_ssn;
- uint16_t sleep_tx_count;
- uint16_t sleep_state_flags;
- uint16_t assoc_id;
- uint16_t beamform_flags;
- uint32_t tfd_queue_msk;
-} __packed; /* IWM_ADD_STA_CMD_API_S_VER_5 */
-
-/**
- * struct iwm_mvm_add_sta_cmd_v6 - Add / modify a station
- * VER_6 of this command is quite similar to VER_5 except
- * exclusion of all fields related to the security key installation.
- */
-struct iwm_mvm_add_sta_cmd_v6 {
- uint8_t add_modify;
- uint8_t reserved1;
+ uint8_t awake_acs;
uint16_t tid_disable_tx;
uint32_t mac_id_n_color;
uint8_t addr[IEEE80211_ADDR_LEN]; /* _STA_ID_MODIFY_INFO_API_S_VER_1 */
@@ -5174,7 +6019,7 @@ struct iwm_mvm_add_sta_cmd_v6 {
uint16_t assoc_id;
uint16_t beamform_flags;
uint32_t tfd_queue_msk;
-} __packed; /* IWM_ADD_STA_CMD_API_S_VER_6 */
+} __packed; /* ADD_STA_CMD_API_S_VER_7 */
/**
* struct iwm_mvm_add_sta_key_cmd - add/modify sta key
@@ -5264,17 +6109,228 @@ struct iwm_mvm_wep_key_cmd {
struct iwm_mvm_wep_key wep_key[0];
} __packed; /* SEC_CURR_WEP_KEY_CMD_API_S_VER_2 */
-
/*
* END mvm/fw-api-sta.h
*/
/*
+ * BT coex
+ */
+
+enum iwm_bt_coex_mode {
+ IWM_BT_COEX_DISABLE = 0x0,
+ IWM_BT_COEX_NW = 0x1,
+ IWM_BT_COEX_BT = 0x2,
+ IWM_BT_COEX_WIFI = 0x3,
+}; /* BT_COEX_MODES_E */
+
+enum iwm_bt_coex_enabled_modules {
+ IWM_BT_COEX_MPLUT_ENABLED = (1 << 0),
+ IWM_BT_COEX_MPLUT_BOOST_ENABLED = (1 << 1),
+ IWM_BT_COEX_SYNC2SCO_ENABLED = (1 << 2),
+ IWM_BT_COEX_CORUN_ENABLED = (1 << 3),
+ IWM_BT_COEX_HIGH_BAND_RET = (1 << 4),
+}; /* BT_COEX_MODULES_ENABLE_E_VER_1 */
+
+/**
+ * struct iwm_bt_coex_cmd - bt coex configuration command
+ * @mode: enum %iwm_bt_coex_mode
+ * @enabled_modules: enum %iwm_bt_coex_enabled_modules
+ *
+ * The structure is used for the BT_COEX command.
+ */
+struct iwm_bt_coex_cmd {
+ uint32_t mode;
+ uint32_t enabled_modules;
+} __packed; /* BT_COEX_CMD_API_S_VER_6 */
+
+
+/*
+ * Location Aware Regulatory (LAR) API - MCC updates
+ */
+
+/**
+ * struct iwm_mcc_update_cmd_v1 - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwm_mcc_source
+ * @reserved: reserved for alignment
+ */
+struct iwm_mcc_update_cmd_v1 {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved;
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_1 */
+
+/**
+ * struct iwm_mcc_update_cmd - Request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: the source from where we got the MCC, see iwm_mcc_source
+ * @reserved: reserved for alignment
+ * @key: integrity key for MCC API OEM testing
+ * @reserved2: reserved
+ */
+struct iwm_mcc_update_cmd {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved;
+ uint32_t key;
+ uint32_t reserved2[5];
+} __packed; /* LAR_UPDATE_MCC_CMD_API_S_VER_2 */
+
+/**
+ * iwm_mcc_update_resp_v1 - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwm_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwm_mcc_source
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ * channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwm_mcc_update_resp_v1 {
+ uint32_t status;
+ uint16_t mcc;
+ uint8_t cap;
+ uint8_t source_id;
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_1 */
+
+/**
+ * iwm_mcc_update_resp - response to MCC_UPDATE_CMD.
+ * Contains the new channel control profile map, if changed, and the new MCC
+ * (mobile country code).
+ * The new MCC may be different than what was requested in MCC_UPDATE_CMD.
+ * @status: see &enum iwm_mcc_update_status
+ * @mcc: the new applied MCC
+ * @cap: capabilities for all channels which matches the MCC
+ * @source_id: the MCC source, see iwm_mcc_source
+ * @time: time elapsed from the MCC test start (in 30 seconds TU)
+ * @reserved: reserved.
+ * @n_channels: number of channels in @channels_data (may be 14, 39, 50 or 51
+ * channels, depending on platform)
+ * @channels: channel control data map, DWORD for each channel. Only the first
+ * 16bits are used.
+ */
+struct iwm_mcc_update_resp {
+ uint32_t status;
+ uint16_t mcc;
+ uint8_t cap;
+ uint8_t source_id;
+ uint16_t time;
+ uint16_t reserved;
+ uint32_t n_channels;
+ uint32_t channels[0];
+} __packed; /* LAR_UPDATE_MCC_CMD_RESP_S_VER_2 */
+
+/**
+ * struct iwm_mcc_chub_notif - chub notifies of mcc change
+ * (MCC_CHUB_UPDATE_CMD = 0xc9)
+ * The Chub (Communication Hub, CommsHUB) is a HW component that connects to
+ * the cellular and connectivity cores that gets updates of the mcc, and
+ * notifies the ucode directly of any mcc change.
+ * The ucode requests the driver to request the device to update geographic
+ * regulatory profile according to the given MCC (Mobile Country Code).
+ * The MCC is two letter-code, ascii upper case[A-Z] or '00' for world domain.
+ * 'ZZ' MCC will be used to switch to NVM default profile; in this case, the
+ * MCC in the cmd response will be the relevant MCC in the NVM.
+ * @mcc: given mobile country code
+ * @source_id: identity of the change originator, see iwm_mcc_source
+ * @reserved1: reserved for alignment
+ */
+struct iwm_mcc_chub_notif {
+ uint16_t mcc;
+ uint8_t source_id;
+ uint8_t reserved1;
+} __packed; /* LAR_MCC_NOTIFY_S */
+
+enum iwm_mcc_update_status {
+ IWM_MCC_RESP_NEW_CHAN_PROFILE,
+ IWM_MCC_RESP_SAME_CHAN_PROFILE,
+ IWM_MCC_RESP_INVALID,
+ IWM_MCC_RESP_NVM_DISABLED,
+ IWM_MCC_RESP_ILLEGAL,
+ IWM_MCC_RESP_LOW_PRIORITY,
+ IWM_MCC_RESP_TEST_MODE_ACTIVE,
+ IWM_MCC_RESP_TEST_MODE_NOT_ACTIVE,
+ IWM_MCC_RESP_TEST_MODE_DENIAL_OF_SERVICE,
+};
+
+enum iwm_mcc_source {
+ IWM_MCC_SOURCE_OLD_FW = 0,
+ IWM_MCC_SOURCE_ME = 1,
+ IWM_MCC_SOURCE_BIOS = 2,
+ IWM_MCC_SOURCE_3G_LTE_HOST = 3,
+ IWM_MCC_SOURCE_3G_LTE_DEVICE = 4,
+ IWM_MCC_SOURCE_WIFI = 5,
+ IWM_MCC_SOURCE_RESERVED = 6,
+ IWM_MCC_SOURCE_DEFAULT = 7,
+ IWM_MCC_SOURCE_UNINITIALIZED = 8,
+ IWM_MCC_SOURCE_MCC_API = 9,
+ IWM_MCC_SOURCE_GET_CURRENT = 0x10,
+ IWM_MCC_SOURCE_GETTING_MCC_TEST_MODE = 0x11,
+};
+
+/*
* Some cherry-picked definitions
*/
#define IWM_FRAME_LIMIT 64
+/*
+ * From Linux commit ab02165ccec4c78162501acedeef1a768acdb811:
+ * As the firmware is slowly running out of command IDs and grouping of
+ * commands is desirable anyway, the firmware is extending the command
+ * header from 4 bytes to 8 bytes to introduce a group (in place of the
+ * former flags field, since that's always 0 on commands and thus can
+ * be easily used to distinguish between the two).
+ *
+ * These functions retrieve specific information from the id field in
+ * the iwm_host_cmd struct which contains the command id, the group id,
+ * and the version of the command.
+*/
+static inline uint8_t
+iwm_cmd_opcode(uint32_t cmdid)
+{
+ return cmdid & 0xff;
+}
+
+static inline uint8_t
+iwm_cmd_groupid(uint32_t cmdid)
+{
+ return ((cmdid & 0Xff00) >> 8);
+}
+
+static inline uint8_t
+iwm_cmd_version(uint32_t cmdid)
+{
+ return ((cmdid & 0xff0000) >> 16);
+}
+
+static inline uint32_t
+iwm_cmd_id(uint8_t opcode, uint8_t groupid, uint8_t version)
+{
+ return opcode + (groupid << 8) + (version << 16);
+}
+
+/* make uint16_t wide id out of uint8_t group and opcode */
+#define IWM_WIDE_ID(grp, opcode) ((grp << 8) | opcode)
+
+/* due to the conversion, this group is special */
+#define IWM_ALWAYS_LONG_GROUP 1
+
struct iwm_cmd_header {
uint8_t code;
uint8_t flags;
@@ -5282,6 +6338,16 @@ struct iwm_cmd_header {
uint8_t qid;
} __packed;
+struct iwm_cmd_header_wide {
+ uint8_t opcode;
+ uint8_t group_id;
+ uint8_t idx;
+ uint8_t qid;
+ uint16_t length;
+ uint8_t reserved;
+ uint8_t version;
+} __packed;
+
enum iwm_power_scheme {
IWM_POWER_SCHEME_CAM = 1,
IWM_POWER_SCHEME_BPS,
@@ -5292,10 +6358,26 @@ enum iwm_power_scheme {
#define IWM_MAX_CMD_PAYLOAD_SIZE ((4096 - 4) - sizeof(struct iwm_cmd_header))
#define IWM_CMD_FAILED_MSK 0x40
+/**
+ * struct iwm_device_cmd
+ *
+ * For allocation of the command and tx queues, this establishes the overall
+ * size of the largest command we send to uCode, except for commands that
+ * aren't fully copied and use other TFD space.
+ */
struct iwm_device_cmd {
- struct iwm_cmd_header hdr;
-
- uint8_t data[IWM_DEF_CMD_PAYLOAD_SIZE];
+ union {
+ struct {
+ struct iwm_cmd_header hdr;
+ uint8_t data[IWM_DEF_CMD_PAYLOAD_SIZE];
+ };
+ struct {
+ struct iwm_cmd_header_wide hdr_wide;
+ uint8_t data_wide[IWM_DEF_CMD_PAYLOAD_SIZE -
+ sizeof(struct iwm_cmd_header_wide) +
+ sizeof(struct iwm_cmd_header)];
+ };
+ };
} __packed;
struct iwm_rx_packet {
@@ -5357,11 +6439,4 @@ iwm_rx_packet_payload_len(const struct iwm_rx_packet *pkt)
bus_space_barrier((sc)->sc_st, (sc)->sc_sh, 0, (sc)->sc_sz, \
BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
-#define IWM_FW_VALID_TX_ANT(sc) \
- ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_TX_CHAIN) \
- >> IWM_FW_PHY_CFG_TX_CHAIN_POS)
-#define IWM_FW_VALID_RX_ANT(sc) \
- ((sc->sc_fw_phy_config & IWM_FW_PHY_CFG_RX_CHAIN) \
- >> IWM_FW_PHY_CFG_RX_CHAIN_POS)
-
#endif /* __IF_IWM_REG_H__ */
diff --git a/sys/dev/iwm/if_iwmvar.h b/sys/dev/iwm/if_iwmvar.h
index e923aa4f70ee..15a4655c03db 100644
--- a/sys/dev/iwm/if_iwmvar.h
+++ b/sys/dev/iwm/if_iwmvar.h
@@ -137,8 +137,9 @@ struct iwm_tx_radiotap_header {
(1 << IEEE80211_RADIOTAP_CHANNEL))
-#define IWM_UCODE_SECT_MAX 6
+#define IWM_UCODE_SECT_MAX 16
#define IWM_FWDMASEGSZ (192*1024)
+#define IWM_FWDMASEGSZ_8000 (320*1024)
/* sanity check value */
#define IWM_FWMAXSIZE (2*1024*1024)
@@ -152,9 +153,10 @@ struct iwm_tx_radiotap_header {
#define IWM_FW_STATUS_DONE 2
enum iwm_ucode_type {
- IWM_UCODE_TYPE_INIT,
IWM_UCODE_TYPE_REGULAR,
+ IWM_UCODE_TYPE_INIT,
IWM_UCODE_TYPE_WOW,
+ IWM_UCODE_TYPE_REGULAR_USNIFFER,
IWM_UCODE_TYPE_MAX
};
@@ -197,8 +199,9 @@ struct iwm_nvm_data {
uint8_t radio_cfg_pnum;
uint8_t valid_tx_ant, valid_rx_ant;
#define IWM_NUM_CHANNELS 39
+#define IWM_NUM_CHANNELS_8000 51
- uint16_t nvm_ch_flags[IWM_NUM_CHANNELS];
+ uint16_t nvm_ch_flags[IWM_NUM_CHANNELS_8000];
uint16_t nvm_version;
uint8_t max_tx_pwr_half_dbm;
@@ -216,9 +219,9 @@ struct iwm_host_cmd {
int handler_status;
uint32_t flags;
+ uint32_t id;
uint16_t len[IWM_MAX_CMD_TBS_PER_TFD];
uint8_t dataflags[IWM_MAX_CMD_TBS_PER_TFD];
- uint8_t id;
};
/*
@@ -269,13 +272,6 @@ struct iwm_tx_ring {
#define IWM_MAX_SCATTER 20
-struct iwm_softc;
-struct iwm_rbuf {
- struct iwm_softc *sc;
- void *vaddr;
- bus_addr_t paddr;
-};
-
struct iwm_rx_data {
struct mbuf *m;
bus_dmamap_t map;
@@ -296,6 +292,7 @@ struct iwm_rx_ring {
struct iwm_ucode_status {
uint32_t uc_error_event_table;
+ uint32_t uc_umac_error_event_table;
uint32_t uc_log_event_table;
int uc_ok;
@@ -304,7 +301,9 @@ struct iwm_ucode_status {
#define IWM_CMD_RESP_MAX PAGE_SIZE
-#define IWM_OTP_LOW_IMAGE_SIZE 2048
+/* lower blocks contain EEPROM image and calibration data */
+#define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_7000 16384
+#define IWM_OTP_LOW_IMAGE_SIZE_FAMILY_8000 32768
#define IWM_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS 500
#define IWM_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS 400
@@ -329,7 +328,7 @@ enum iwm_hcmd_dataflag {
* iwlwifi/iwl-phy-db
*/
-#define IWM_NUM_PAPD_CH_GROUPS 4
+#define IWM_NUM_PAPD_CH_GROUPS 9
#define IWM_NUM_TXP_CH_GROUPS 9
struct iwm_phy_db_entry {
@@ -386,6 +385,7 @@ struct iwm_node {
#define IWM_NODE(_ni) ((struct iwm_node *)(_ni))
#define IWM_STATION_ID 0
+#define IWM_AUX_STA_ID 1
#define IWM_DEFAULT_MACID 0
#define IWM_DEFAULT_COLOR 0
@@ -409,7 +409,7 @@ struct iwm_softc {
#define IWM_FLAG_STOPPED (1 << 2)
#define IWM_FLAG_RFKILL (1 << 3)
#define IWM_FLAG_BUSY (1 << 4)
-#define IWM_FLAG_DORESUME (1 << 5)
+#define IWM_FLAG_SCANNING (1 << 5)
struct intr_config_hook sc_preinit_hook;
struct callout sc_watchdog_to;
@@ -441,7 +441,14 @@ struct iwm_softc {
int ict_cur;
int sc_hw_rev;
+#define IWM_SILICON_A_STEP 0
+#define IWM_SILICON_B_STEP 1
+#define IWM_SILICON_C_STEP 2
+#define IWM_SILICON_D_STEP 3
int sc_hw_id;
+ int sc_device_family;
+#define IWM_DEVICE_FAMILY_7000 1
+#define IWM_DEVICE_FAMILY_8000 2
struct iwm_dma_info kw_dma;
struct iwm_dma_info fw_dma;
@@ -451,10 +458,14 @@ struct iwm_softc {
struct iwm_ucode_status sc_uc;
enum iwm_ucode_type sc_uc_current;
- int sc_fwver;
+ char sc_fwver[32];
int sc_capaflags;
int sc_capa_max_probe_len;
+ int sc_capa_n_scan_channels;
+ uint32_t sc_ucode_api;
+ uint8_t sc_enabled_capa[howmany(IWM_NUM_UCODE_TLV_CAPA, NBBY)];
+ char sc_fw_mcc[3];
int sc_intmask;
@@ -482,10 +493,7 @@ struct iwm_softc {
int sc_tx_timer;
- struct iwm_scan_cmd *sc_scan_cmd;
- size_t sc_scan_cmd_len;
int sc_scan_last_antenna;
- int sc_scanband;
int sc_fixed_ridx;
@@ -495,7 +503,6 @@ struct iwm_softc {
uint8_t sc_cmd_resp[IWM_CMD_RESP_MAX];
int sc_wantresp;
- struct taskqueue *sc_tq;
struct task sc_es_task;
struct iwm_rx_phy_info sc_last_phy_info;
diff --git a/sys/dev/ixgb/if_ixgb.h b/sys/dev/ixgb/if_ixgb.h
index e42dc1deca75..d7044a780f81 100644
--- a/sys/dev/ixgb/if_ixgb.h
+++ b/sys/dev/ixgb/if_ixgb.h
@@ -277,7 +277,7 @@ struct adapter {
/* FreeBSD operating-system-specific structures */
struct ixgb_osdep osdep;
- struct device *dev;
+ device_t dev;
struct resource *res_memory;
struct resource *res_ioport;
struct resource *res_interrupt;
diff --git a/sys/dev/ixgb/if_ixgb_osdep.h b/sys/dev/ixgb/if_ixgb_osdep.h
index e8ce885525bb..33b550a71f34 100644
--- a/sys/dev/ixgb/if_ixgb_osdep.h
+++ b/sys/dev/ixgb/if_ixgb_osdep.h
@@ -90,7 +90,7 @@ struct ixgb_osdep
{
bus_space_tag_t mem_bus_space_tag;
bus_space_handle_t mem_bus_space_handle;
- struct device *dev;
+ device_t dev;
};
#define IXGB_WRITE_FLUSH(a) IXGB_READ_REG(a, STATUS)
diff --git a/sys/dev/ixgbe/ixgbe.h b/sys/dev/ixgbe/ixgbe.h
index 6a1522f82564..68f2edb57eef 100644
--- a/sys/dev/ixgbe/ixgbe.h
+++ b/sys/dev/ixgbe/ixgbe.h
@@ -458,7 +458,7 @@ struct adapter {
struct ixgbe_hw hw;
struct ixgbe_osdep osdep;
- struct device *dev;
+ device_t dev;
struct ifnet *ifp;
struct resource *pci_mem;
diff --git a/sys/dev/ixl/i40e_adminq.c b/sys/dev/ixl/i40e_adminq.c
index 792f832ac70f..cfffc1f01909 100644
--- a/sys/dev/ixl/i40e_adminq.c
+++ b/sys/dev/ixl/i40e_adminq.c
@@ -39,16 +39,6 @@
#include "i40e_prototype.h"
/**
- * i40e_is_nvm_update_op - return TRUE if this is an NVM update operation
- * @desc: API request descriptor
- **/
-static INLINE bool i40e_is_nvm_update_op(struct i40e_aq_desc *desc)
-{
- return (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_erase)) ||
- (desc->opcode == CPU_TO_LE16(i40e_aqc_opc_nvm_update));
-}
-
-/**
* i40e_adminq_init_regs - Initialize AdminQ registers
* @hw: pointer to the hardware structure
*
@@ -661,13 +651,9 @@ enum i40e_status_code i40e_init_adminq(struct i40e_hw *hw)
/* pre-emptive resource lock release */
i40e_aq_release_resource(hw, I40E_NVM_RESOURCE_ID, 0, NULL);
- hw->aq.nvm_release_on_done = FALSE;
+ hw->nvm_release_on_done = FALSE;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- ret_code = i40e_aq_set_hmc_resource_profile(hw,
- I40E_HMC_PROFILE_DEFAULT,
- 0,
- NULL);
ret_code = I40E_SUCCESS;
/* success! */
@@ -1081,26 +1067,7 @@ enum i40e_status_code i40e_clean_arq_element(struct i40e_hw *hw,
hw->aq.arq.next_to_clean = ntc;
hw->aq.arq.next_to_use = ntu;
- if (i40e_is_nvm_update_op(&e->desc)) {
- if (hw->aq.nvm_release_on_done) {
- i40e_release_nvm(hw);
- hw->aq.nvm_release_on_done = FALSE;
- }
-
- switch (hw->nvmupd_state) {
- case I40E_NVMUPD_STATE_INIT_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
- break;
-
- case I40E_NVMUPD_STATE_WRITE_WAIT:
- hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
- break;
-
- default:
- break;
- }
- }
-
+ i40e_nvmupd_check_wait_event(hw, LE16_TO_CPU(e->desc.opcode));
clean_arq_element_out:
/* Set pending if needed, unlock and return */
if (pending != NULL)
diff --git a/sys/dev/ixl/i40e_adminq.h b/sys/dev/ixl/i40e_adminq.h
index 6448b8be2cf0..a0279273bd94 100644
--- a/sys/dev/ixl/i40e_adminq.h
+++ b/sys/dev/ixl/i40e_adminq.h
@@ -105,7 +105,6 @@ struct i40e_adminq_info {
u32 fw_build; /* firmware build number */
u16 api_maj_ver; /* api major version */
u16 api_min_ver; /* api minor version */
- bool nvm_release_on_done;
struct i40e_spinlock asq_spinlock; /* Send queue spinlock */
struct i40e_spinlock arq_spinlock; /* Receive queue spinlock */
diff --git a/sys/dev/ixl/i40e_adminq_cmd.h b/sys/dev/ixl/i40e_adminq_cmd.h
index 7c0ace2e6b75..764ce11fb772 100644
--- a/sys/dev/ixl/i40e_adminq_cmd.h
+++ b/sys/dev/ixl/i40e_adminq_cmd.h
@@ -140,6 +140,10 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_list_func_capabilities = 0x000A,
i40e_aqc_opc_list_dev_capabilities = 0x000B,
+ /* Proxy commands */
+ i40e_aqc_opc_set_proxy_config = 0x0104,
+ i40e_aqc_opc_set_ns_proxy_table_entry = 0x0105,
+
/* LAA */
i40e_aqc_opc_mac_address_read = 0x0107,
i40e_aqc_opc_mac_address_write = 0x0108,
@@ -147,6 +151,10 @@ enum i40e_admin_queue_opc {
/* PXE */
i40e_aqc_opc_clear_pxe_mode = 0x0110,
+ /* WoL commands */
+ i40e_aqc_opc_set_wol_filter = 0x0120,
+ i40e_aqc_opc_get_wake_reason = 0x0121,
+
/* internal switch commands */
i40e_aqc_opc_get_switch_config = 0x0200,
i40e_aqc_opc_add_statistics = 0x0201,
@@ -185,6 +193,7 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_remove_control_packet_filter = 0x025B,
i40e_aqc_opc_add_cloud_filters = 0x025C,
i40e_aqc_opc_remove_cloud_filters = 0x025D,
+ i40e_aqc_opc_clear_wol_switch_filters = 0x025E,
i40e_aqc_opc_add_mirror_rule = 0x0260,
i40e_aqc_opc_delete_mirror_rule = 0x0261,
@@ -212,7 +221,6 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_suspend_port_tx = 0x041B,
i40e_aqc_opc_resume_port_tx = 0x041C,
i40e_aqc_opc_configure_partition_bw = 0x041D,
-
/* hmc */
i40e_aqc_opc_query_hmc_resource_profile = 0x0500,
i40e_aqc_opc_set_hmc_resource_profile = 0x0501,
@@ -271,6 +279,10 @@ enum i40e_admin_queue_opc {
/* Tunnel commands */
i40e_aqc_opc_add_udp_tunnel = 0x0B00,
i40e_aqc_opc_del_udp_tunnel = 0x0B01,
+ i40e_aqc_opc_set_rss_key = 0x0B02,
+ i40e_aqc_opc_set_rss_lut = 0x0B03,
+ i40e_aqc_opc_get_rss_key = 0x0B04,
+ i40e_aqc_opc_get_rss_lut = 0x0B05,
/* Async Events */
i40e_aqc_opc_event_lan_overflow = 0x1001,
@@ -433,6 +445,7 @@ struct i40e_aqc_list_capabilities_element_resp {
#define I40E_AQ_CAP_ID_SDP 0x0062
#define I40E_AQ_CAP_ID_MDIO 0x0063
#define I40E_AQ_CAP_ID_WSR_PROT 0x0064
+#define I40E_AQ_CAP_ID_NVM_MGMT 0x0080
#define I40E_AQ_CAP_ID_FLEX10 0x00F1
#define I40E_AQ_CAP_ID_CEM 0x00F2
@@ -457,13 +470,15 @@ I40E_CHECK_CMD_LENGTH(i40e_aqc_cppm_configuration);
/* Set ARP Proxy command / response (indirect 0x0104) */
struct i40e_aqc_arp_proxy_data {
__le16 command_flags;
-#define I40E_AQ_ARP_INIT_IPV4 0x0008
-#define I40E_AQ_ARP_UNSUP_CTL 0x0010
-#define I40E_AQ_ARP_ENA 0x0020
-#define I40E_AQ_ARP_ADD_IPV4 0x0040
-#define I40E_AQ_ARP_DEL_IPV4 0x0080
+#define I40E_AQ_ARP_INIT_IPV4 0x0800
+#define I40E_AQ_ARP_UNSUP_CTL 0x1000
+#define I40E_AQ_ARP_ENA 0x2000
+#define I40E_AQ_ARP_ADD_IPV4 0x4000
+#define I40E_AQ_ARP_DEL_IPV4 0x8000
__le16 table_id;
- __le32 pfpm_proxyfc;
+ __le32 enabled_offloads;
+#define I40E_AQ_ARP_DIRECTED_OFFLOAD_ENABLE 0x00000020
+#define I40E_AQ_ARP_OFFLOAD_ENABLE 0x00000800
__le32 ip_addr;
u8 mac_addr[6];
u8 reserved[2];
@@ -478,17 +493,19 @@ struct i40e_aqc_ns_proxy_data {
__le16 table_idx_ipv6_0;
__le16 table_idx_ipv6_1;
__le16 control;
-#define I40E_AQ_NS_PROXY_ADD_0 0x0100
-#define I40E_AQ_NS_PROXY_DEL_0 0x0200
-#define I40E_AQ_NS_PROXY_ADD_1 0x0400
-#define I40E_AQ_NS_PROXY_DEL_1 0x0800
-#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x1000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x2000
-#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x4000
-#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x8000
-#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0001
-#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0002
-#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0004
+#define I40E_AQ_NS_PROXY_ADD_0 0x0001
+#define I40E_AQ_NS_PROXY_DEL_0 0x0002
+#define I40E_AQ_NS_PROXY_ADD_1 0x0004
+#define I40E_AQ_NS_PROXY_DEL_1 0x0008
+#define I40E_AQ_NS_PROXY_ADD_IPV6_0 0x0010
+#define I40E_AQ_NS_PROXY_DEL_IPV6_0 0x0020
+#define I40E_AQ_NS_PROXY_ADD_IPV6_1 0x0040
+#define I40E_AQ_NS_PROXY_DEL_IPV6_1 0x0080
+#define I40E_AQ_NS_PROXY_COMMAND_SEQ 0x0100
+#define I40E_AQ_NS_PROXY_INIT_IPV6_TBL 0x0200
+#define I40E_AQ_NS_PROXY_INIT_MAC_TBL 0x0400
+#define I40E_AQ_NS_PROXY_OFFLOAD_ENABLE 0x0800
+#define I40E_AQ_NS_PROXY_DIRECTED_OFFLOAD_ENABLE 0x1000
u8 mac_addr_0[6];
u8 mac_addr_1[6];
u8 local_mac_addr[6];
@@ -538,6 +555,7 @@ I40E_CHECK_STRUCT_LEN(24, i40e_aqc_mac_address_read_data);
/* Manage MAC Address Write Command (0x0108) */
struct i40e_aqc_mac_address_write {
__le16 command_flags;
+#define I40E_AQC_MC_MAG_EN 0x0100
#define I40E_AQC_WRITE_TYPE_LAA_ONLY 0x0000
#define I40E_AQC_WRITE_TYPE_LAA_WOL 0x4000
#define I40E_AQC_WRITE_TYPE_PORT 0x8000
@@ -561,6 +579,56 @@ struct i40e_aqc_clear_pxe {
I40E_CHECK_CMD_LENGTH(i40e_aqc_clear_pxe);
+/* Set WoL Filter (0x0120) */
+
+struct i40e_aqc_set_wol_filter {
+ __le16 filter_index;
+#define I40E_AQC_MAX_NUM_WOL_FILTERS 8
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT 15
+#define I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_MASK (0x1 << \
+ I40E_AQC_SET_WOL_FILTER_TYPE_MAGIC_SHIFT)
+
+#define I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT 0
+#define I40E_AQC_SET_WOL_FILTER_INDEX_MASK (0x7 << \
+ I40E_AQC_SET_WOL_FILTER_INDEX_SHIFT)
+ __le16 cmd_flags;
+#define I40E_AQC_SET_WOL_FILTER 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL 0x4000
+#define I40E_AQC_SET_WOL_FILTER_ACTION_CLEAR 0
+#define I40E_AQC_SET_WOL_FILTER_ACTION_SET 1
+ __le16 valid_flags;
+#define I40E_AQC_SET_WOL_FILTER_ACTION_VALID 0x8000
+#define I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID 0x4000
+ u8 reserved[2];
+ __le32 address_high;
+ __le32 address_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_set_wol_filter);
+
+struct i40e_aqc_set_wol_filter_data {
+ u8 filter[128];
+ u8 mask[16];
+};
+
+I40E_CHECK_STRUCT_LEN(0x90, i40e_aqc_set_wol_filter_data);
+
+/* Get Wake Reason (0x0121) */
+
+struct i40e_aqc_get_wake_reason_completion {
+ u8 reserved_1[2];
+ __le16 wake_reason;
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT 0
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_MATCHED_INDEX_SHIFT)
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT 8
+#define I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_MASK (0xFF << \
+ I40E_AQC_GET_WAKE_UP_REASON_WOL_REASON_RESERVED_SHIFT)
+ u8 reserved_2[12];
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_wake_reason_completion);
+
/* Switch configuration commands (0x02xx) */
/* Used by many indirect commands that only pass an seid and a buffer in the
@@ -643,6 +711,8 @@ struct i40e_aqc_set_port_parameters {
#define I40E_AQ_SET_P_PARAMS_PAD_SHORT_PACKETS 2 /* must set! */
#define I40E_AQ_SET_P_PARAMS_DOUBLE_VLAN_ENA 4
__le16 bad_frame_vsi;
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_SHIFT 0x0
+#define I40E_AQ_SET_P_PARAMS_BFRAME_SEID_MASK 0x3FF
__le16 default_seid; /* reserved for command */
u8 reserved[10];
};
@@ -694,6 +764,7 @@ I40E_CHECK_STRUCT_LEN(0x10, i40e_aqc_switch_resource_alloc_element_resp);
/* Set Switch Configuration (direct 0x0205) */
struct i40e_aqc_set_switch_config {
__le16 flags;
+/* flags used for both fields below */
#define I40E_AQ_SET_SWITCH_CFG_PROMISC 0x0001
#define I40E_AQ_SET_SWITCH_CFG_L2_FILTER 0x0002
__le16 valid_flags;
@@ -862,8 +933,12 @@ struct i40e_aqc_vsi_properties_data {
I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
/* queueing option section */
u8 queueing_opt_flags;
+#define I40E_AQ_VSI_QUE_OPT_MULTICAST_UDP_ENA 0x04
+#define I40E_AQ_VSI_QUE_OPT_UNICAST_UDP_ENA 0x08
#define I40E_AQ_VSI_QUE_OPT_TCP_ENA 0x10
#define I40E_AQ_VSI_QUE_OPT_FCOE_ENA 0x20
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_PF 0x00
+#define I40E_AQ_VSI_QUE_OPT_RSS_LUT_VSI 0x40
u8 queueing_opt_reserved[3];
/* scheduler section */
u8 up_enable_bits;
@@ -1597,15 +1672,12 @@ struct i40e_aq_get_set_hmc_resource_profile {
I40E_CHECK_CMD_LENGTH(i40e_aq_get_set_hmc_resource_profile);
enum i40e_aq_hmc_profile {
- /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
+ /* I40E_HMC_PROFILE_NO_CHANGE = 0, reserved */
I40E_HMC_PROFILE_DEFAULT = 1,
I40E_HMC_PROFILE_FAVOR_VF = 2,
I40E_HMC_PROFILE_EQUAL = 3,
};
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK 0xF
-#define I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK 0x3F
-
/* Get PHY Abilities (indirect 0x0600) uses the generic indirect struct */
/* set in param0 for get phy abilities to report qualified modules */
@@ -1641,6 +1713,10 @@ enum i40e_aq_phy_type {
I40E_PHY_TYPE_1000BASE_LX = 0x1C,
I40E_PHY_TYPE_1000BASE_T_OPTICAL = 0x1D,
I40E_PHY_TYPE_20GBASE_KR2 = 0x1E,
+ I40E_PHY_TYPE_25GBASE_KR = 0x1F,
+ I40E_PHY_TYPE_25GBASE_CR = 0x20,
+ I40E_PHY_TYPE_25GBASE_SR = 0x21,
+ I40E_PHY_TYPE_25GBASE_LR = 0x22,
I40E_PHY_TYPE_MAX
};
@@ -1649,6 +1725,7 @@ enum i40e_aq_phy_type {
#define I40E_LINK_SPEED_10GB_SHIFT 0x3
#define I40E_LINK_SPEED_40GB_SHIFT 0x4
#define I40E_LINK_SPEED_20GB_SHIFT 0x5
+#define I40E_LINK_SPEED_25GB_SHIFT 0x6
enum i40e_aq_link_speed {
I40E_LINK_SPEED_UNKNOWN = 0,
@@ -1656,7 +1733,8 @@ enum i40e_aq_link_speed {
I40E_LINK_SPEED_1GB = (1 << I40E_LINK_SPEED_1000MB_SHIFT),
I40E_LINK_SPEED_10GB = (1 << I40E_LINK_SPEED_10GB_SHIFT),
I40E_LINK_SPEED_40GB = (1 << I40E_LINK_SPEED_40GB_SHIFT),
- I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT)
+ I40E_LINK_SPEED_20GB = (1 << I40E_LINK_SPEED_20GB_SHIFT),
+ I40E_LINK_SPEED_25GB = (1 << I40E_LINK_SPEED_25GB_SHIFT),
};
struct i40e_aqc_module_desc {
@@ -1689,7 +1767,13 @@ struct i40e_aq_get_phy_abilities_resp {
__le32 eeer_val;
u8 d3_lpan;
#define I40E_AQ_SET_PHY_D3_LPAN_ENA 0x01
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 mod_type_ext;
+ u8 ext_comp_code;
u8 phy_id[4];
u8 module_type[3];
u8 qualified_module_count;
@@ -1711,7 +1795,12 @@ struct i40e_aq_set_phy_config { /* same bits as above in all */
__le16 eee_capability;
__le32 eeer;
u8 low_power_ctrl;
- u8 reserved[3];
+ u8 phy_type_ext;
+#define I40E_AQ_PHY_TYPE_EXT_25G_KR 0X01
+#define I40E_AQ_PHY_TYPE_EXT_25G_CR 0X02
+#define I40E_AQ_PHY_TYPE_EXT_25G_SR 0x04
+#define I40E_AQ_PHY_TYPE_EXT_25G_LR 0x08
+ u8 reserved[2];
};
I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
@@ -1791,16 +1880,24 @@ struct i40e_aqc_get_link_status {
#define I40E_AQ_LINK_TX_DRAINED 0x01
#define I40E_AQ_LINK_TX_FLUSHED 0x03
#define I40E_AQ_LINK_FORCED_40G 0x10
+/* 25G Error Codes */
+#define I40E_AQ_25G_NO_ERR 0X00
+#define I40E_AQ_25G_NOT_PRESENT 0X01
+#define I40E_AQ_25G_NVM_CRC_ERR 0X02
+#define I40E_AQ_25G_SBUS_UCODE_ERR 0X03
+#define I40E_AQ_25G_SERDES_UCODE_ERR 0X04
+#define I40E_AQ_25G_NIMB_UCODE_ERR 0X05
u8 loopback; /* use defines from i40e_aqc_set_lb_mode */
__le16 max_frame_size;
u8 config;
#define I40E_AQ_CONFIG_CRC_ENA 0x04
#define I40E_AQ_CONFIG_PACING_MASK 0x78
- u8 external_power_ability;
+ u8 power_desc;
#define I40E_AQ_LINK_POWER_CLASS_1 0x00
#define I40E_AQ_LINK_POWER_CLASS_2 0x01
#define I40E_AQ_LINK_POWER_CLASS_3 0x02
#define I40E_AQ_LINK_POWER_CLASS_4 0x03
+#define I40E_AQ_PWR_CLASS_MASK 0x03
u8 reserved[4];
};
@@ -1857,7 +1954,10 @@ struct i40e_aqc_set_phy_debug {
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_NONE 0x00
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_HARD 0x01
#define I40E_AQ_PHY_DEBUG_RESET_EXTERNAL_SOFT 0x02
+/* Disable link manageability on a single port */
#define I40E_AQ_PHY_DEBUG_DISABLE_LINK_FW 0x10
+/* Disable link manageability on all ports needs both bits 4 and 5 */
+#define I40E_AQ_PHY_DEBUG_DISABLE_ALL_LINK_FW 0x20
u8 reserved[15];
};
@@ -2296,6 +2396,46 @@ struct i40e_aqc_del_udp_tunnel_completion {
I40E_CHECK_CMD_LENGTH(i40e_aqc_del_udp_tunnel_completion);
+struct i40e_aqc_get_set_rss_key {
+#define I40E_AQC_SET_RSS_KEY_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_KEY_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT)
+ __le16 vsi_id;
+ u8 reserved[6];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_key);
+
+struct i40e_aqc_get_set_rss_key_data {
+ u8 standard_rss_key[0x28];
+ u8 extended_hash_key[0xc];
+};
+
+I40E_CHECK_STRUCT_LEN(0x34, i40e_aqc_get_set_rss_key_data);
+
+struct i40e_aqc_get_set_rss_lut {
+#define I40E_AQC_SET_RSS_LUT_VSI_VALID (0x1 << 15)
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_VSI_ID_MASK (0x3FF << \
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT)
+ __le16 vsi_id;
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK (0x1 << \
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT)
+
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI 0
+#define I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF 1
+ __le16 flags;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+I40E_CHECK_CMD_LENGTH(i40e_aqc_get_set_rss_lut);
+
/* tunnel key structure 0x0B10 */
struct i40e_aqc_tunnel_key_structure {
diff --git a/sys/dev/ixl/i40e_common.c b/sys/dev/ixl/i40e_common.c
index 5a8a8aec73ff..79229752ca29 100644
--- a/sys/dev/ixl/i40e_common.c
+++ b/sys/dev/ixl/i40e_common.c
@@ -64,8 +64,24 @@ enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw)
case I40E_DEV_ID_10G_BASE_T4:
case I40E_DEV_ID_20G_KR2:
case I40E_DEV_ID_20G_KR2_A:
+ case I40E_DEV_ID_25G_B:
+ case I40E_DEV_ID_25G_SFP28:
hw->mac.type = I40E_MAC_XL710;
break;
+ case I40E_DEV_ID_X722_A0:
+ case I40E_DEV_ID_KX_X722:
+ case I40E_DEV_ID_QSFP_X722:
+ case I40E_DEV_ID_SFP_X722:
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ case I40E_DEV_ID_10G_BASE_T_X722:
+ case I40E_DEV_ID_SFP_I_X722:
+ hw->mac.type = I40E_MAC_X722;
+ break;
+ case I40E_DEV_ID_X722_VF:
+ case I40E_DEV_ID_X722_VF_HV:
+ case I40E_DEV_ID_X722_A0_VF:
+ hw->mac.type = I40E_MAC_X722_VF;
+ break;
case I40E_DEV_ID_VF:
case I40E_DEV_ID_VF_HV:
hw->mac.type = I40E_MAC_VF;
@@ -341,14 +357,15 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask, void *desc,
/* the most we could have left is 16 bytes, pad with zeros */
if (i < len) {
char d_buf[16];
- int j;
+ int j, i_sav;
+ i_sav = i;
memset(d_buf, 0, sizeof(d_buf));
for (j = 0; i < len; j++, i++)
d_buf[j] = buf[i];
i40e_debug(hw, mask,
"\t0x%04X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
- i, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
+ i_sav, d_buf[0], d_buf[1], d_buf[2], d_buf[3],
d_buf[4], d_buf[5], d_buf[6], d_buf[7],
d_buf[8], d_buf[9], d_buf[10], d_buf[11],
d_buf[12], d_buf[13], d_buf[14], d_buf[15]);
@@ -400,6 +417,164 @@ enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw,
return status;
}
+/**
+ * i40e_aq_get_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ * @set: set TRUE to set the table, FALSE to get the table
+ *
+ * Internal function to get or set RSS look up table
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_lut(struct i40e_hw *hw,
+ u16 vsi_id, bool pf_lut,
+ u8 *lut, u16 lut_size,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_lut *cmd_resp =
+ (struct i40e_aqc_get_set_rss_lut *)&desc.params.raw;
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_lut);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_lut);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_LUT_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_LUT_VSI_VALID);
+
+ if (pf_lut)
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_PF <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+ else
+ cmd_resp->flags |= CPU_TO_LE16((u16)
+ ((I40E_AQC_SET_RSS_LUT_TABLE_TYPE_VSI <<
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_SHIFT) &
+ I40E_AQC_SET_RSS_LUT_TABLE_TYPE_MASK));
+
+ status = i40e_asq_send_command(hw, &desc, lut, lut_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * get the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size,
+ FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_lut
+ * @hw: pointer to the hardware structure
+ * @vsi_id: vsi fw index
+ * @pf_lut: for PF table set TRUE, for VSI table set FALSE
+ * @lut: pointer to the lut buffer provided by the caller
+ * @lut_size: size of the lut buffer
+ *
+ * set the RSS lookup table, PF or VSI type
+ **/
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 vsi_id,
+ bool pf_lut, u8 *lut, u16 lut_size)
+{
+ return i40e_aq_get_set_rss_lut(hw, vsi_id, pf_lut, lut, lut_size, TRUE);
+}
+
+/**
+ * i40e_aq_get_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ * @set: set TRUE to set the key, FALSE to get the key
+ *
+ * get the RSS key per VSI
+ **/
+static enum i40e_status_code i40e_aq_get_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key,
+ bool set)
+{
+ enum i40e_status_code status;
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_set_rss_key *cmd_resp =
+ (struct i40e_aqc_get_set_rss_key *)&desc.params.raw;
+ u16 key_size = sizeof(struct i40e_aqc_get_set_rss_key_data);
+
+ if (set)
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_rss_key);
+ else
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_get_rss_key);
+
+ /* Indirect command */
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_BUF);
+ desc.flags |= CPU_TO_LE16((u16)I40E_AQ_FLAG_RD);
+
+ cmd_resp->vsi_id =
+ CPU_TO_LE16((u16)((vsi_id <<
+ I40E_AQC_SET_RSS_KEY_VSI_ID_SHIFT) &
+ I40E_AQC_SET_RSS_KEY_VSI_ID_MASK));
+ cmd_resp->vsi_id |= CPU_TO_LE16((u16)I40E_AQC_SET_RSS_KEY_VSI_VALID);
+
+ status = i40e_asq_send_command(hw, &desc, key, key_size, NULL);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ **/
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, FALSE);
+}
+
+/**
+ * i40e_aq_set_rss_key
+ * @hw: pointer to the hw struct
+ * @vsi_id: vsi fw index
+ * @key: pointer to key info struct
+ *
+ * set the RSS key per VSI
+ **/
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 vsi_id,
+ struct i40e_aqc_get_set_rss_key_data *key)
+{
+ return i40e_aq_get_set_rss_key(hw, vsi_id, key, TRUE);
+}
+
/* The i40e_ptype_lookup table is used to convert from the 8-bit ptype in the
* hardware to a bit-field that can be used by SW to more easily determine the
* packet type.
@@ -563,7 +738,7 @@ struct i40e_rx_ptype_decoded i40e_ptype_lookup[] = {
/* Non Tunneled IPv6 */
I40E_PTT(88, IP, IPV6, FRG, NONE, NONE, NOF, NONE, PAY3),
I40E_PTT(89, IP, IPV6, NOF, NONE, NONE, NOF, NONE, PAY3),
- I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY3),
+ I40E_PTT(90, IP, IPV6, NOF, NONE, NONE, NOF, UDP, PAY4),
I40E_PTT_UNUSED_ENTRY(91),
I40E_PTT(92, IP, IPV6, NOF, NONE, NONE, NOF, TCP, PAY4),
I40E_PTT(93, IP, IPV6, NOF, NONE, NONE, NOF, SCTP, PAY4),
@@ -813,6 +988,7 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
switch (hw->mac.type) {
case I40E_MAC_XL710:
+ case I40E_MAC_X722:
break;
default:
return I40E_ERR_DEVICE_NOT_SUPPORTED;
@@ -832,6 +1008,9 @@ enum i40e_status_code i40e_init_shared_code(struct i40e_hw *hw)
else
hw->pf_id = (u8)(func_rid & 0x7);
+ if (hw->mac.type == I40E_MAC_X722)
+ hw->flags |= I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE;
+
status = i40e_init_nvm(hw);
return status;
}
@@ -1104,8 +1283,7 @@ enum i40e_status_code i40e_pf_reset(struct i40e_hw *hw)
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
- /* It can take upto 15 secs for GRST steady state */
- grst_del = grst_del * 20; /* bump it to 16 secs max to be safe */
+ grst_del = grst_del * 20;
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT);
@@ -1452,8 +1630,10 @@ enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
if (hw->aq.asq_last_status == I40E_AQ_RC_EIO)
status = I40E_ERR_UNKNOWN_PHY;
- if (report_init)
+ if (report_init) {
hw->phy.phy_types = LE32_TO_CPU(abilities->phy_type);
+ hw->phy.phy_types |= ((u64)abilities->phy_type_ext << 32);
+ }
return status;
}
@@ -1997,15 +2177,45 @@ enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw,
}
/**
+ * i40e_aq_clear_default_vsi
+ * @hw: pointer to the hw struct
+ * @seid: vsi number
+ * @cmd_details: pointer to command details structure or NULL
+ **/
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
+ (struct i40e_aqc_set_vsi_promiscuous_modes *)
+ &desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_vsi_promiscuous_modes);
+
+ cmd->promiscuous_flags = CPU_TO_LE16(0);
+ cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_DEFAULT);
+ cmd->seid = CPU_TO_LE16(seid);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ return status;
+}
+
+/**
* i40e_aq_set_vsi_unicast_promiscuous
* @hw: pointer to the hw struct
* @seid: vsi number
* @set: set unicast promiscuous enable/disable
* @cmd_details: pointer to command details structure or NULL
+ * @rx_only_promisc: flag to decide if egress traffic gets mirrored in promisc
**/
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
u16 seid, bool set,
- struct i40e_asq_cmd_details *cmd_details)
+ struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_vsi_promiscuous_modes *cmd =
@@ -2018,8 +2228,9 @@ enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
if (set) {
flags |= I40E_AQC_SET_VSI_PROMISC_UNICAST;
- if (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
- (hw->aq.api_maj_ver > 1))
+ if (rx_only_promisc &&
+ (((hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver >= 5)) ||
+ (hw->aq.api_maj_ver > 1)))
flags |= I40E_AQC_SET_VSI_PROMISC_TX;
}
@@ -2192,7 +2403,7 @@ enum i40e_status_code i40e_aq_set_vsi_vlan_promisc(struct i40e_hw *hw,
i40e_aqc_opc_set_vsi_promiscuous_modes);
if (enable)
flags |= I40E_AQC_SET_VSI_PROMISC_VLAN;
-
+
cmd->promiscuous_flags = CPU_TO_LE16(flags);
cmd->valid_flags = CPU_TO_LE16(I40E_AQC_SET_VSI_PROMISC_VLAN);
cmd->seid = CPU_TO_LE16(seid);
@@ -2826,10 +3037,7 @@ enum i40e_status_code i40e_aq_delete_mirrorrule(struct i40e_hw *hw, u16 sw_seid,
u16 *rules_used, u16 *rules_free)
{
/* Rule ID has to be valid except rule_type: INGRESS VLAN mirroring */
- if (rule_type != I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
- if (!rule_id)
- return I40E_ERR_PARAM;
- } else {
+ if (rule_type == I40E_AQC_MIRROR_RULE_TYPE_VLAN) {
/* count and mr_list shall be valid for rule_type INGRESS VLAN
* mirroring. For other rule_type, count and rule_type should
* not matter.
@@ -3026,67 +3234,6 @@ enum i40e_status_code i40e_aq_debug_write_register(struct i40e_hw *hw,
}
/**
- * i40e_aq_get_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * query the HMC profile of the device.
- **/
-enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile *profile,
- u8 *pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *resp =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_query_hmc_resource_profile);
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- *profile = (enum i40e_aq_hmc_profile)(resp->pm_profile &
- I40E_AQ_GET_HMC_RESOURCE_PROFILE_PM_MASK);
- *pe_vf_enabled_count = resp->pe_vf_enabled &
- I40E_AQ_GET_HMC_RESOURCE_PROFILE_COUNT_MASK;
-
- return status;
-}
-
-/**
- * i40e_aq_set_hmc_resource_profile
- * @hw: pointer to the hw struct
- * @profile: type of profile the HMC is to be set as
- * @pe_vf_enabled_count: the number of PE enabled VFs the system has
- * @cmd_details: pointer to command details structure or NULL
- *
- * set the HMC profile of the device.
- **/
-enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details)
-{
- struct i40e_aq_desc desc;
- struct i40e_aq_get_set_hmc_resource_profile *cmd =
- (struct i40e_aq_get_set_hmc_resource_profile *)&desc.params.raw;
- enum i40e_status_code status;
-
- i40e_fill_default_direct_cmd_desc(&desc,
- i40e_aqc_opc_set_hmc_resource_profile);
-
- cmd->pm_profile = (u8)profile;
- cmd->pe_vf_enabled = pe_vf_enabled_count;
-
- status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
-
- return status;
-}
-
-/**
* i40e_aq_request_resource
* @hw: pointer to the hw struct
* @resource: resource id
@@ -3603,6 +3750,26 @@ static void i40e_parse_discover_capabilities(struct i40e_hw *hw, void *buff,
"HW Capability: wr_csr_prot = 0x%llX\n\n",
(p->wr_csr_prot & 0xffff));
break;
+ case I40E_AQ_CAP_ID_NVM_MGMT:
+ if (number & I40E_NVM_MGMT_SEC_REV_DISABLED)
+ p->sec_rev_disabled = TRUE;
+ if (number & I40E_NVM_MGMT_UPDATE_DISABLED)
+ p->update_disabled = TRUE;
+ break;
+ case I40E_AQ_CAP_ID_WOL_AND_PROXY:
+ hw->num_wol_proxy_filters = (u16)number;
+ hw->wol_proxy_vsi_seid = (u16)logical_id;
+ p->apm_wol_support = phys_id & I40E_WOL_SUPPORT_MASK;
+ if (phys_id & I40E_ACPI_PROGRAMMING_METHOD_MASK)
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK;
+ else
+ p->acpi_prog_method = I40E_ACPI_PROGRAMMING_METHOD_HW_FVL;
+ p->proxy_support = (phys_id & I40E_PROXY_SUPPORT_MASK) ? 1 : 0;
+ p->proxy_support = p->proxy_support;
+ i40e_debug(hw, I40E_DEBUG_INIT,
+ "HW Capability: WOL proxy filters = %d\n",
+ hw->num_wol_proxy_filters);
+ break;
default:
break;
}
@@ -5211,6 +5378,35 @@ void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
}
/**
+ * i40e_fix_up_geneve_vni - adjust Geneve VNI for HW issue
+ * @filters: list of cloud filters
+ * @filter_count: length of list
+ *
+ * There's an issue in the device where the Geneve VNI layout needs
+ * to be shifted 1 byte over from the VxLAN VNI
+ **/
+static void i40e_fix_up_geneve_vni(
+ struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
+ u8 filter_count)
+{
+ struct i40e_aqc_add_remove_cloud_filters_element_data *f = filters;
+ int i;
+
+ for (i = 0; i < filter_count; i++) {
+ u16 tnl_type;
+ u32 ti;
+
+ tnl_type = (LE16_TO_CPU(f[i].flags) &
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_MASK) >>
+ I40E_AQC_ADD_CLOUD_TNL_TYPE_SHIFT;
+ if (tnl_type == I40E_AQC_ADD_CLOUD_TNL_TYPE_GENEVE) {
+ ti = LE32_TO_CPU(f[i].tenant_id);
+ f[i].tenant_id = CPU_TO_LE32(ti << 8);
+ }
+ }
+}
+
+/**
* i40e_aq_add_cloud_filters
* @hw: pointer to the hardware structure
* @seid: VSI seid to add cloud filters from
@@ -5230,8 +5426,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
struct i40e_aq_desc desc;
struct i40e_aqc_add_remove_cloud_filters *cmd =
(struct i40e_aqc_add_remove_cloud_filters *)&desc.params.raw;
- u16 buff_len;
enum i40e_status_code status;
+ u16 buff_len;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_add_cloud_filters);
@@ -5242,6 +5438,8 @@ enum i40e_status_code i40e_aq_add_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
@@ -5279,6 +5477,8 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
cmd->num_filters = filter_count;
cmd->seid = CPU_TO_LE16(seid);
+ i40e_fix_up_geneve_vni(filters, filter_count);
+
status = i40e_asq_send_command(hw, &desc, filters, buff_len, NULL);
return status;
@@ -6263,3 +6463,158 @@ enum i40e_status_code i40e_vf_reset(struct i40e_hw *hw)
return i40e_aq_send_msg_to_pf(hw, I40E_VIRTCHNL_OP_RESET_VF,
I40E_SUCCESS, NULL, 0, NULL);
}
+
+/**
+ * i40e_aq_set_arp_proxy_config
+ * @hw: pointer to the HW structure
+ * @proxy_config - pointer to proxy config command table struct
+ * @cmd_details: pointer to command details
+ *
+ * Set ARP offload parameters from pre-populated
+ * i40e_aqc_arp_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!proxy_config)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_proxy_config);
+
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)proxy_config));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)proxy_config));
+
+ status = i40e_asq_send_command(hw, &desc, proxy_config,
+ sizeof(struct i40e_aqc_arp_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_opc_set_ns_proxy_table_entry
+ * @hw: pointer to the HW structure
+ * @ns_proxy_table_entry: pointer to NS table entry command struct
+ * @cmd_details: pointer to command details
+ *
+ * Set IPv6 Neighbor Solicitation (NS) protocol offload parameters
+ * from pre-populated i40e_aqc_ns_proxy_data struct
+ **/
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ enum i40e_status_code status;
+
+ if (!ns_proxy_table_entry)
+ return I40E_ERR_PARAM;
+
+ i40e_fill_default_direct_cmd_desc(&desc,
+ i40e_aqc_opc_set_ns_proxy_table_entry);
+
+ desc.params.external.addr_high =
+ CPU_TO_LE32(I40E_HI_DWORD((u64)ns_proxy_table_entry));
+ desc.params.external.addr_low =
+ CPU_TO_LE32(I40E_LO_DWORD((u64)ns_proxy_table_entry));
+
+ status = i40e_asq_send_command(hw, &desc, ns_proxy_table_entry,
+ sizeof(struct i40e_aqc_ns_proxy_data),
+ cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_set_clear_wol_filter
+ * @hw: pointer to the hw struct
+ * @filter_index: index of filter to modify (0-7)
+ * @filter: buffer containing filter to be set
+ * @set_filter: TRUE to set filter, FALSE to clear filter
+ * @no_wol_tco: if TRUE, pass through packets cannot cause wake-up
+ * if FALSE, pass through packets may cause wake-up
+ * @filter_valid: TRUE if filter action is valid
+ * @no_wol_tco_valid: TRUE if no WoL in TCO traffic action valid
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Set or clear WoL filter for port attached to the PF
+ **/
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_set_wol_filter *cmd =
+ (struct i40e_aqc_set_wol_filter *)&desc.params.raw;
+ enum i40e_status_code status;
+ u16 cmd_flags = 0;
+ u16 valid_flags = 0;
+ u16 buff_len = 0;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_wol_filter);
+
+ if (filter_index >= I40E_AQC_MAX_NUM_WOL_FILTERS)
+ return I40E_ERR_PARAM;
+ cmd->filter_index = CPU_TO_LE16(filter_index);
+
+ if (set_filter) {
+ if (!filter)
+ return I40E_ERR_PARAM;
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER;
+ buff_len = sizeof(*filter);
+ }
+ if (no_wol_tco)
+ cmd_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_WOL;
+ cmd->cmd_flags = CPU_TO_LE16(cmd_flags);
+
+ if (filter_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_ACTION_VALID;
+ if (no_wol_tco_valid)
+ valid_flags |= I40E_AQC_SET_WOL_FILTER_NO_TCO_ACTION_VALID;
+ cmd->valid_flags = CPU_TO_LE16(valid_flags);
+
+ cmd->address_high = CPU_TO_LE32(I40E_HI_DWORD((u64)filter));
+ cmd->address_low = CPU_TO_LE32(I40E_LO_DWORD((u64)filter));
+
+ status = i40e_asq_send_command(hw, &desc, filter,
+ buff_len, cmd_details);
+
+ return status;
+}
+
+/**
+ * i40e_aq_get_wake_event_reason
+ * @hw: pointer to the hw struct
+ * @wake_reason: return value, index of matching filter
+ * @cmd_details: pointer to command details structure or NULL
+ *
+ * Get information for the reason of a Wake Up event
+ **/
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details)
+{
+ struct i40e_aq_desc desc;
+ struct i40e_aqc_get_wake_reason_completion *resp =
+ (struct i40e_aqc_get_wake_reason_completion *)&desc.params.raw;
+ enum i40e_status_code status;
+
+ i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_get_wake_reason);
+
+ status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
+
+ if (status == I40E_SUCCESS)
+ *wake_reason = LE16_TO_CPU(resp->wake_reason);
+
+ return status;
+}
+
diff --git a/sys/dev/ixl/i40e_devids.h b/sys/dev/ixl/i40e_devids.h
index a898f6d53d49..5725cb96754c 100644
--- a/sys/dev/ixl/i40e_devids.h
+++ b/sys/dev/ixl/i40e_devids.h
@@ -50,8 +50,20 @@
#define I40E_DEV_ID_20G_KR2 0x1587
#define I40E_DEV_ID_20G_KR2_A 0x1588
#define I40E_DEV_ID_10G_BASE_T4 0x1589
+#define I40E_DEV_ID_25G_B 0x158A
+#define I40E_DEV_ID_25G_SFP28 0x158B
#define I40E_DEV_ID_VF 0x154C
#define I40E_DEV_ID_VF_HV 0x1571
+#define I40E_DEV_ID_X722_A0 0x374C
+#define I40E_DEV_ID_X722_A0_VF 0x374D
+#define I40E_DEV_ID_KX_X722 0x37CE
+#define I40E_DEV_ID_QSFP_X722 0x37CF
+#define I40E_DEV_ID_SFP_X722 0x37D0
+#define I40E_DEV_ID_1G_BASE_T_X722 0x37D1
+#define I40E_DEV_ID_10G_BASE_T_X722 0x37D2
+#define I40E_DEV_ID_SFP_I_X722 0x37D3
+#define I40E_DEV_ID_X722_VF 0x37CD
+#define I40E_DEV_ID_X722_VF_HV 0x37D9
#define i40e_is_40G_device(d) ((d) == I40E_DEV_ID_QSFP_A || \
(d) == I40E_DEV_ID_QSFP_B || \
diff --git a/sys/dev/ixl/i40e_nvm.c b/sys/dev/ixl/i40e_nvm.c
index 4556d1734f81..151691ec97e2 100644
--- a/sys/dev/ixl/i40e_nvm.c
+++ b/sys/dev/ixl/i40e_nvm.c
@@ -220,7 +220,15 @@ enum i40e_status_code i40e_read_nvm_word(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ }
return ret_code;
}
@@ -238,7 +246,10 @@ enum i40e_status_code __i40e_read_nvm_word(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ ret_code = i40e_read_nvm_word_aq(hw, offset, data);
+ else
+ ret_code = i40e_read_nvm_word_srctl(hw, offset, data);
return ret_code;
}
@@ -330,7 +341,10 @@ enum i40e_status_code __i40e_read_nvm_buffer(struct i40e_hw *hw,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE)
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words, data);
+ else
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
return ret_code;
}
@@ -350,7 +364,16 @@ enum i40e_status_code i40e_read_nvm_buffer(struct i40e_hw *hw, u16 offset,
{
enum i40e_status_code ret_code = I40E_SUCCESS;
- ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ if (hw->flags & I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE) {
+ ret_code = i40e_acquire_nvm(hw, I40E_RESOURCE_READ);
+ if (!ret_code) {
+ ret_code = i40e_read_nvm_buffer_aq(hw, offset, words,
+ data);
+ i40e_release_nvm(hw);
+ }
+ } else {
+ ret_code = i40e_read_nvm_buffer_srctl(hw, offset, words, data);
+ }
return ret_code;
}
@@ -834,10 +857,10 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
/* early check for status command and debug msgs */
upd_cmd = i40e_nvmupd_validate_command(hw, cmd, perrno);
- i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
+ i40e_debug(hw, I40E_DEBUG_NVM, "%s state %d nvm_release_on_hold %d opc 0x%04x cmd 0x%08x config 0x%08x offset 0x%08x data_size 0x%08x\n",
i40e_nvm_update_state_str[upd_cmd],
hw->nvmupd_state,
- hw->aq.nvm_release_on_done,
+ hw->nvm_release_on_done, hw->nvm_wait_opcode,
cmd->command, cmd->config, cmd->offset, cmd->data_size);
if (upd_cmd == I40E_NVMUPD_INVALID) {
@@ -851,7 +874,18 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
* going into the state machine
*/
if (upd_cmd == I40E_NVMUPD_STATUS) {
+ if (!cmd->data_size) {
+ *perrno = -EFAULT;
+ return I40E_ERR_BUF_TOO_SHORT;
+ }
+
bytes[0] = hw->nvmupd_state;
+
+ if (cmd->data_size >= 4) {
+ bytes[1] = 0;
+ *((u16 *)&bytes[2]) = hw->nvm_wait_opcode;
+ }
+
return I40E_SUCCESS;
}
@@ -870,6 +904,14 @@ enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
case I40E_NVMUPD_STATE_INIT_WAIT:
case I40E_NVMUPD_STATE_WRITE_WAIT:
+ /* if we need to stop waiting for an event, clear
+ * the wait info and return before doing anything else
+ */
+ if (cmd->offset == 0xffff) {
+ i40e_nvmupd_check_wait_event(hw, hw->nvm_wait_opcode);
+ return I40E_SUCCESS;
+ }
+
status = I40E_ERR_NOT_READY;
*perrno = -EBUSY;
break;
@@ -942,7 +984,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_erase;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -958,7 +1001,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
if (status) {
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -971,10 +1015,12 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
hw->aq.asq_last_status);
} else {
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (status)
+ if (status) {
i40e_release_nvm(hw);
- else
+ } else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
}
break;
@@ -992,7 +1038,8 @@ static enum i40e_status_code i40e_nvmupd_state_init(struct i40e_hw *hw,
-EIO;
i40e_release_nvm(hw);
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
}
@@ -1087,8 +1134,10 @@ retry:
switch (upd_cmd) {
case I40E_NVMUPD_WRITE_CON:
status = i40e_nvmupd_nvm_write(hw, cmd, bytes, perrno);
- if (!status)
+ if (!status) {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
+ }
break;
case I40E_NVMUPD_WRITE_LCB:
@@ -1100,7 +1149,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1115,6 +1165,7 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_WRITE_WAIT;
}
break;
@@ -1129,7 +1180,8 @@ retry:
-EIO;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
} else {
- hw->aq.nvm_release_on_done = TRUE;
+ hw->nvm_release_on_done = TRUE;
+ hw->nvm_wait_opcode = i40e_aqc_opc_nvm_update;
hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
}
break;
@@ -1179,6 +1231,38 @@ retry:
}
/**
+ * i40e_nvmupd_check_wait_event - handle NVM update operation events
+ * @hw: pointer to the hardware structure
+ * @opcode: the event that just happened
+ **/
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode)
+{
+ if (opcode == hw->nvm_wait_opcode) {
+
+ i40e_debug(hw, I40E_DEBUG_NVM,
+ "NVMUPD: clearing wait on opcode 0x%04x\n", opcode);
+ if (hw->nvm_release_on_done) {
+ i40e_release_nvm(hw);
+ hw->nvm_release_on_done = FALSE;
+ }
+ hw->nvm_wait_opcode = 0;
+
+ switch (hw->nvmupd_state) {
+ case I40E_NVMUPD_STATE_INIT_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT;
+ break;
+
+ case I40E_NVMUPD_STATE_WRITE_WAIT:
+ hw->nvmupd_state = I40E_NVMUPD_STATE_WRITING;
+ break;
+
+ default:
+ break;
+ }
+ }
+}
+
+/**
* i40e_nvmupd_validate_command - Validate given command
* @hw: pointer to hardware structure
* @cmd: pointer to nvm update command buffer
@@ -1340,6 +1424,12 @@ static enum i40e_status_code i40e_nvmupd_exec_aq(struct i40e_hw *hw,
*perrno = i40e_aq_rc_to_posix(status, hw->aq.asq_last_status);
}
+ /* should we wait for a followup event? */
+ if (cmd->offset) {
+ hw->nvm_wait_opcode = cmd->offset;
+ hw->nvmupd_state = I40E_NVMUPD_STATE_INIT_WAIT;
+ }
+
return status;
}
diff --git a/sys/dev/ixl/i40e_osdep.c b/sys/dev/ixl/i40e_osdep.c
index 9b77627942e7..2a771515a9ac 100644
--- a/sys/dev/ixl/i40e_osdep.c
+++ b/sys/dev/ixl/i40e_osdep.c
@@ -32,7 +32,7 @@
******************************************************************************/
/*$FreeBSD$*/
-#include <machine/stdarg.h>
+#include <sys/limits.h>
#include "ixl.h"
@@ -137,7 +137,7 @@ void
i40e_init_spinlock(struct i40e_spinlock *lock)
{
mtx_init(&lock->mutex, "mutex",
- MTX_NETWORK_LOCK, MTX_DEF | MTX_DUPOK);
+ "ixl spinlock", MTX_DEF | MTX_DUPOK);
}
void
@@ -159,11 +159,34 @@ i40e_destroy_spinlock(struct i40e_spinlock *lock)
mtx_destroy(&lock->mutex);
}
+void
+i40e_msec_pause(int msecs)
+{
+ int ticks_to_pause = (msecs * hz) / 1000;
+ int start_ticks = ticks;
+
+ if (cold || SCHEDULER_STOPPED()) {
+ i40e_msec_delay(msecs);
+ return;
+ }
+
+ while (1) {
+ kern_yield(PRI_USER);
+ int yielded_ticks = ticks - start_ticks;
+ if (yielded_ticks > ticks_to_pause)
+ break;
+ else if (yielded_ticks < 0
+ && (yielded_ticks + INT_MAX + 1 > ticks_to_pause)) {
+ break;
+ }
+ }
+}
+
/*
* Helper function for debug statement printing
*/
void
-i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
+i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask, char *fmt, ...)
{
va_list args;
diff --git a/sys/dev/ixl/i40e_osdep.h b/sys/dev/ixl/i40e_osdep.h
index f5ca7f461d30..7f9873d94aba 100644
--- a/sys/dev/ixl/i40e_osdep.h
+++ b/sys/dev/ixl/i40e_osdep.h
@@ -55,7 +55,7 @@
#include <dev/pci/pcireg.h>
#define i40e_usec_delay(x) DELAY(x)
-#define i40e_msec_delay(x) DELAY(1000*(x))
+#define i40e_msec_delay(x) DELAY(1000 * (x))
#define DBG 0
#define MSGOUT(S, A, B) printf(S "\n", A, B)
@@ -75,12 +75,13 @@
#define DEBUGOUT7(S,A,B,C,D,E,F,G)
#endif
-#define UNREFERENCED_XPARAMETER
+/* Remove unused shared code macros */
#define UNREFERENCED_PARAMETER(_p)
#define UNREFERENCED_1PARAMETER(_p)
#define UNREFERENCED_2PARAMETER(_p, _q)
#define UNREFERENCED_3PARAMETER(_p, _q, _r)
#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s)
+#define UNREFERENCED_5PARAMETER(_p, _q, _r, _s, _t)
#define STATIC static
#define INLINE inline
@@ -110,9 +111,6 @@
#define FIELD_SIZEOF(x, y) (sizeof(((x*)0)->y))
-#define BIT(a) (1UL << (a))
-#define BIT_ULL(a) (1ULL << (a))
-
typedef uint8_t u8;
typedef int8_t s8;
typedef uint16_t u16;
@@ -153,7 +151,7 @@ struct i40e_osdep {
bus_space_handle_t mem_bus_space_handle;
bus_size_t mem_bus_space_size;
uint32_t flush_reg;
- struct device *dev;
+ device_t dev;
};
struct i40e_dma_mem {
@@ -180,10 +178,13 @@ void i40e_write_pci_cfg(struct i40e_hw *, u32, u16);
** i40e_debug - OS dependent version of shared code debug printing
*/
enum i40e_debug_mask;
-#define i40e_debug(h, m, s, ...) i40e_debug_d(h, m, s, ##__VA_ARGS__)
-extern void i40e_debug_d(struct i40e_hw *hw, enum i40e_debug_mask mask,
+#define i40e_debug(h, m, s, ...) i40e_debug_shared(h, m, s, ##__VA_ARGS__)
+extern void i40e_debug_shared(struct i40e_hw *hw, enum i40e_debug_mask mask,
char *fmt_str, ...);
+/* Non-busy-wait that uses kern_yield() */
+void i40e_msec_pause(int);
+
/*
** This hardware supports either 16 or 32 byte rx descriptors;
** the driver only uses the 32 byte kind.
diff --git a/sys/dev/ixl/i40e_prototype.h b/sys/dev/ixl/i40e_prototype.h
index c53945fa30c2..01d11d6335b2 100644
--- a/sys/dev/ixl/i40e_prototype.h
+++ b/sys/dev/ixl/i40e_prototype.h
@@ -77,6 +77,17 @@ void i40e_debug_aq(struct i40e_hw *hw, enum i40e_debug_mask mask,
void i40e_idle_aq(struct i40e_hw *hw);
bool i40e_check_asq_alive(struct i40e_hw *hw);
enum i40e_status_code i40e_aq_queue_shutdown(struct i40e_hw *hw, bool unloading);
+
+enum i40e_status_code i40e_aq_get_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_set_rss_lut(struct i40e_hw *hw, u16 seid,
+ bool pf_lut, u8 *lut, u16 lut_size);
+enum i40e_status_code i40e_aq_get_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
+enum i40e_status_code i40e_aq_set_rss_key(struct i40e_hw *hw,
+ u16 seid,
+ struct i40e_aqc_get_set_rss_key_data *key);
const char *i40e_aq_str(struct i40e_hw *hw, enum i40e_admin_queue_err aq_err);
const char *i40e_stat_str(struct i40e_hw *hw, enum i40e_status_code stat_err);
@@ -107,6 +118,8 @@ enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_default_vsi(struct i40e_hw *hw, u16 vsi_id,
struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_clear_default_vsi(struct i40e_hw *hw, u16 vsi_id,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_get_phy_capabilities(struct i40e_hw *hw,
bool qualified_modules, bool report_init,
struct i40e_aq_get_phy_abilities_resp *abilities,
@@ -149,7 +162,8 @@ enum i40e_status_code i40e_aq_set_vsi_broadcast(struct i40e_hw *hw,
u16 vsi_id, bool set_filter,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_unicast_promiscuous(struct i40e_hw *hw,
- u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
+ u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details,
+ bool rx_only_promisc);
enum i40e_status_code i40e_aq_set_vsi_multicast_promiscuous(struct i40e_hw *hw,
u16 vsi_id, bool set, struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_set_vsi_mc_promisc_on_vlan(struct i40e_hw *hw,
@@ -329,10 +343,6 @@ enum i40e_status_code i40e_aq_config_vsi_bw_limit(struct i40e_hw *hw,
enum i40e_status_code i40e_aq_dcb_ignore_pfc(struct i40e_hw *hw,
u8 tcmap, bool request, u8 *tcmap_ret,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_get_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile *profile,
- u8 *pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_ets_bw_limit(
struct i40e_hw *hw, u16 seid,
struct i40e_aqc_configure_switching_comp_ets_bw_limit_data *bw_data,
@@ -343,10 +353,6 @@ enum i40e_status_code i40e_aq_config_vsi_ets_sla_bw_limit(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_dcb_updated(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details);
-enum i40e_status_code i40e_aq_set_hmc_resource_profile(struct i40e_hw *hw,
- enum i40e_aq_hmc_profile profile,
- u8 pe_vf_enabled_count,
- struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_aq_config_switch_comp_bw_limit(struct i40e_hw *hw,
u16 seid, u16 credit, u8 max_bw,
struct i40e_asq_cmd_details *cmd_details);
@@ -386,7 +392,6 @@ enum i40e_status_code i40e_aq_remove_cloud_filters(struct i40e_hw *hw,
u16 vsi,
struct i40e_aqc_add_remove_cloud_filters_element_data *filters,
u8 filter_count);
-
enum i40e_status_code i40e_aq_alternate_read(struct i40e_hw *hw,
u32 reg_addr0, u32 *reg_val0,
u32 reg_addr1, u32 *reg_val1);
@@ -448,6 +453,7 @@ enum i40e_status_code i40e_validate_nvm_checksum(struct i40e_hw *hw,
enum i40e_status_code i40e_nvmupd_command(struct i40e_hw *hw,
struct i40e_nvm_access *cmd,
u8 *bytes, int *);
+void i40e_nvmupd_check_wait_event(struct i40e_hw *hw, u16 opcode);
void i40e_set_pci_config_data(struct i40e_hw *hw, u16 link_status);
enum i40e_status_code i40e_set_mac_type(struct i40e_hw *hw);
@@ -496,9 +502,26 @@ enum i40e_status_code i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
+enum i40e_status_code i40e_aq_set_arp_proxy_config(struct i40e_hw *hw,
+ struct i40e_aqc_arp_proxy_data *proxy_config,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_ns_proxy_table_entry(struct i40e_hw *hw,
+ struct i40e_aqc_ns_proxy_data *ns_proxy_table_entry,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_set_clear_wol_filter(struct i40e_hw *hw,
+ u8 filter_index,
+ struct i40e_aqc_set_wol_filter_data *filter,
+ bool set_filter, bool no_wol_tco,
+ bool filter_valid, bool no_wol_tco_valid,
+ struct i40e_asq_cmd_details *cmd_details);
+enum i40e_status_code i40e_aq_get_wake_event_reason(struct i40e_hw *hw,
+ u16 *wake_reason,
+ struct i40e_asq_cmd_details *cmd_details);
enum i40e_status_code i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
enum i40e_status_code i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
+enum i40e_status_code i40e_blink_phy_link_led(struct i40e_hw *hw,
+ u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */
diff --git a/sys/dev/ixl/i40e_register.h b/sys/dev/ixl/i40e_register.h
index 9fbfc7b826c2..fb41ea23a3d0 100644
--- a/sys/dev/ixl/i40e_register.h
+++ b/sys/dev/ixl/i40e_register.h
@@ -3398,4 +3398,1966 @@
#define I40E_VFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_VFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
#define I40E_VFQF_HREGION_REGION_7_SHIFT 29
#define I40E_VFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_VFQF_HREGION_REGION_7_SHIFT)
+
+#define I40E_MNGSB_FDCRC 0x000B7050 /* Reset: POR */
+#define I40E_MNGSB_FDCRC_CRC_RES_SHIFT 0
+#define I40E_MNGSB_FDCRC_CRC_RES_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCRC_CRC_RES_SHIFT)
+#define I40E_MNGSB_FDCS 0x000B7040 /* Reset: POR */
+#define I40E_MNGSB_FDCS_CRC_CONT_SHIFT 2
+#define I40E_MNGSB_FDCS_CRC_CONT_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_CONT_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT 3
+#define I40E_MNGSB_FDCS_CRC_SEED_EN_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_SEED_EN_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT 4
+#define I40E_MNGSB_FDCS_CRC_WR_INH_MASK I40E_MASK(0x1, I40E_MNGSB_FDCS_CRC_WR_INH_SHIFT)
+#define I40E_MNGSB_FDCS_CRC_SEED_SHIFT 8
+#define I40E_MNGSB_FDCS_CRC_SEED_MASK I40E_MASK(0xFF, I40E_MNGSB_FDCS_CRC_SEED_SHIFT)
+#define I40E_MNGSB_FDS 0x000B7048 /* Reset: POR */
+#define I40E_MNGSB_FDS_START_BC_SHIFT 0
+#define I40E_MNGSB_FDS_START_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_START_BC_SHIFT)
+#define I40E_MNGSB_FDS_LAST_BC_SHIFT 16
+#define I40E_MNGSB_FDS_LAST_BC_MASK I40E_MASK(0xFFF, I40E_MNGSB_FDS_LAST_BC_SHIFT)
+
+#define I40E_GL_VF_CTRL_RX(_VF) (0x00083600 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_RX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_RX_AQ_RX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_RX_AQ_RX_EN_SHIFT)
+#define I40E_GL_VF_CTRL_TX(_VF) (0x00083400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: EMPR */
+#define I40E_GL_VF_CTRL_TX_MAX_INDEX 127
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT 0
+#define I40E_GL_VF_CTRL_TX_AQ_TX_EN_MASK I40E_MASK(0x1, I40E_GL_VF_CTRL_TX_AQ_TX_EN_SHIFT)
+
+#define I40E_GLCM_LAN_CACHESIZE 0x0010C4D8 /* Reset: CORER */
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_LAN_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_LAN_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_LAN_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_LAN_CACHESIZE_WAYS_MASK I40E_MASK(0x3FF, I40E_GLCM_LAN_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE 0x00138FE4 /* Reset: CORER */
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLCM_PE_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFFF, I40E_GLCM_PE_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_SETS_SHIFT 12
+#define I40E_GLCM_PE_CACHESIZE_SETS_MASK I40E_MASK(0xF, I40E_GLCM_PE_CACHESIZE_SETS_SHIFT)
+#define I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT 16
+#define I40E_GLCM_PE_CACHESIZE_WAYS_MASK I40E_MASK(0x1FF, I40E_GLCM_PE_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFCM_PE_ERRDATA 0x00138D00 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT 0
+#define I40E_PFCM_PE_ERRDATA_ERROR_CODE_MASK I40E_MASK(0xF, I40E_PFCM_PE_ERRDATA_ERROR_CODE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT 4
+#define I40E_PFCM_PE_ERRDATA_Q_TYPE_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRDATA_Q_TYPE_SHIFT)
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT 8
+#define I40E_PFCM_PE_ERRDATA_Q_NUM_MASK I40E_MASK(0x3FFFF, I40E_PFCM_PE_ERRDATA_Q_NUM_SHIFT)
+#define I40E_PFCM_PE_ERRINFO 0x00138C80 /* Reset: PFR */
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT 0
+#define I40E_PFCM_PE_ERRINFO_ERROR_VALID_MASK I40E_MASK(0x1, I40E_PFCM_PE_ERRINFO_ERROR_VALID_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT 4
+#define I40E_PFCM_PE_ERRINFO_ERROR_INST_MASK I40E_MASK(0x7, I40E_PFCM_PE_ERRINFO_ERROR_INST_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT 8
+#define I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_DBL_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT 16
+#define I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLU_ERROR_CNT_SHIFT)
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT 24
+#define I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_MASK I40E_MASK(0xFF, I40E_PFCM_PE_ERRINFO_RLS_ERROR_CNT_SHIFT)
+
+#define I40E_PRTDCB_TFMSTC(_i) (0x000A0040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PRTDCB_TFMSTC_MAX_INDEX 7
+#define I40E_PRTDCB_TFMSTC_MSTC_SHIFT 0
+#define I40E_PRTDCB_TFMSTC_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TFMSTC_MSTC_SHIFT)
+#define I40E_GL_FWSTS_FWROWD_SHIFT 8
+#define I40E_GL_FWSTS_FWROWD_MASK I40E_MASK(0x1, I40E_GL_FWSTS_FWROWD_SHIFT)
+#define I40E_GLFOC_CACHESIZE 0x000AA0DC /* Reset: CORER */
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLFOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLFOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLFOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLFOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLFOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLFOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLFOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLFOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLHMC_APBVTINUSEBASE(_i) (0x000C4a00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_APBVTINUSEBASE_MAX_INDEX 15
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_APBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_CEQPART(_i) (0x001312C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_CEQPART_MAX_INDEX 15
+#define I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_CEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_CEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_CEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_CEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_DBCQMAX 0x000C20F0 /* Reset: CORER */
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT 0
+#define I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_MASK I40E_MASK(0x3FFFF, I40E_GLHMC_DBCQMAX_GLHMC_DBCQMAX_SHIFT)
+#define I40E_GLHMC_DBCQPART(_i) (0x00131240 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBCQPART_MAX_INDEX 15
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_DBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_DBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_DBQPMAX 0x000C20EC /* Reset: CORER */
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT 0
+#define I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_MASK I40E_MASK(0x7FFFF, I40E_GLHMC_DBQPMAX_GLHMC_DBQPMAX_SHIFT)
+#define I40E_GLHMC_DBQPPART(_i) (0x00138D80 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_DBQPPART_MAX_INDEX 15
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_DBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_DBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_DBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_DBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_PEARPBASE(_i) (0x000C4800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_PEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_PEARPCNT(_i) (0x000C4900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEARPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_PEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_PEARPMAX 0x000C2038 /* Reset: CORER */
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT 0
+#define I40E_GLHMC_PEARPMAX_PMPEARPMAX_MASK I40E_MASK(0x1FFFF, I40E_GLHMC_PEARPMAX_PMPEARPMAX_SHIFT)
+#define I40E_GLHMC_PEARPOBJSZ 0x000C2034 /* Reset: CORER */
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_MASK I40E_MASK(0x7, I40E_GLHMC_PEARPOBJSZ_PMPEARPOBJSZ_SHIFT)
+#define I40E_GLHMC_PECQBASE(_i) (0x000C4200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_PECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_PECQCNT(_i) (0x000C4300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PECQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_PECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_PECQOBJSZ 0x000C2020 /* Reset: CORER */
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PECQOBJSZ_PMPECQOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTCNT(_i) (0x000C4700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_PEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_PEHTEBASE(_i) (0x000C4600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEHTEBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_PEHTEOBJSZ 0x000C202c /* Reset: CORER */
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEHTEOBJSZ_PMPEHTEOBJSZ_SHIFT)
+#define I40E_GLHMC_PEHTMAX 0x000C2030 /* Reset: CORER */
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT 0
+#define I40E_GLHMC_PEHTMAX_PMPEHTMAX_MASK I40E_MASK(0x1FFFFF, I40E_GLHMC_PEHTMAX_PMPEHTMAX_SHIFT)
+#define I40E_GLHMC_PEMRBASE(_i) (0x000C4c00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_PEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_PEMRCNT(_i) (0x000C4d00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEMRCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_PEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_PEMRMAX 0x000C2040 /* Reset: CORER */
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT 0
+#define I40E_GLHMC_PEMRMAX_PMPEMRMAX_MASK I40E_MASK(0x7FFFFF, I40E_GLHMC_PEMRMAX_PMPEMRMAX_SHIFT)
+#define I40E_GLHMC_PEMROBJSZ 0x000C203c /* Reset: CORER */
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT 0
+#define I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEMROBJSZ_PMPEMROBJSZ_SHIFT)
+#define I40E_GLHMC_PEPBLBASE(_i) (0x000C5800 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_PEPBLCNT(_i) (0x000C5900 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEPBLCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_PEPBLMAX 0x000C206c /* Reset: CORER */
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT 0
+#define I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEPBLMAX_PMPEPBLMAX_SHIFT)
+#define I40E_GLHMC_PEPFFIRSTSD 0x000C20E4 /* Reset: CORER */
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT 0
+#define I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_MASK I40E_MASK(0xFFF, I40E_GLHMC_PEPFFIRSTSD_GLHMC_PEPFFIRSTSD_SHIFT)
+#define I40E_GLHMC_PEQ1BASE(_i) (0x000C5200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1BASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_PEQ1CNT(_i) (0x000C5300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1CNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_PEQ1FLBASE(_i) (0x000C5400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_PEQ1FLMAX 0x000C2058 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT 0
+#define I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1FLMAX_PMPEQ1FLMAX_SHIFT)
+#define I40E_GLHMC_PEQ1MAX 0x000C2054 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT 0
+#define I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEQ1MAX_PMPEQ1MAX_SHIFT)
+#define I40E_GLHMC_PEQ1OBJSZ 0x000C2050 /* Reset: CORER */
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQ1OBJSZ_PMPEQ1OBJSZ_SHIFT)
+#define I40E_GLHMC_PEQPBASE(_i) (0x000C4000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_PEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_PEQPCNT(_i) (0x000C4100 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEQPCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_PEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_PEQPOBJSZ 0x000C201c /* Reset: CORER */
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEQPOBJSZ_PMPEQPOBJSZ_SHIFT)
+#define I40E_GLHMC_PESRQBASE(_i) (0x000C4400 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQBASE_MAX_INDEX 15
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_PESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_PESRQCNT(_i) (0x000C4500 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PESRQCNT_MAX_INDEX 15
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_PESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_PESRQMAX 0x000C2028 /* Reset: CORER */
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT 0
+#define I40E_GLHMC_PESRQMAX_PMPESRQMAX_MASK I40E_MASK(0xFFFF, I40E_GLHMC_PESRQMAX_PMPESRQMAX_SHIFT)
+#define I40E_GLHMC_PESRQOBJSZ 0x000C2024 /* Reset: CORER */
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT 0
+#define I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PESRQOBJSZ_PMPESRQOBJSZ_SHIFT)
+#define I40E_GLHMC_PETIMERBASE(_i) (0x000C5A00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERBASE_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_PETIMERCNT(_i) (0x000C5B00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PETIMERCNT_MAX_INDEX 15
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_PETIMERMAX 0x000C2084 /* Reset: CORER */
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT 0
+#define I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PETIMERMAX_PMPETIMERMAX_SHIFT)
+#define I40E_GLHMC_PETIMEROBJSZ 0x000C2080 /* Reset: CORER */
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT 0
+#define I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PETIMEROBJSZ_PMPETIMEROBJSZ_SHIFT)
+#define I40E_GLHMC_PEXFBASE(_i) (0x000C4e00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_PEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_PEXFCNT(_i) (0x000C4f00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFCNT_MAX_INDEX 15
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_PEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_PEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_PEXFFLBASE(_i) (0x000C5000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLBASE_MAX_INDEX 15
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_PEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_PEXFFLMAX 0x000C204c /* Reset: CORER */
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT 0
+#define I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_MASK I40E_MASK(0x1FFFFFF, I40E_GLHMC_PEXFFLMAX_PMPEXFFLMAX_SHIFT)
+#define I40E_GLHMC_PEXFMAX 0x000C2048 /* Reset: CORER */
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT 0
+#define I40E_GLHMC_PEXFMAX_PMPEXFMAX_MASK I40E_MASK(0x3FFFFFF, I40E_GLHMC_PEXFMAX_PMPEXFMAX_SHIFT)
+#define I40E_GLHMC_PEXFOBJSZ 0x000C2044 /* Reset: CORER */
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT 0
+#define I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_MASK I40E_MASK(0xF, I40E_GLHMC_PEXFOBJSZ_PMPEXFOBJSZ_SHIFT)
+#define I40E_GLHMC_PFPESDPART(_i) (0x000C0880 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLHMC_PFPESDPART_MAX_INDEX 15
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_PFPESDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_PFPESDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_PFPESDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_PFPESDPART_PMSDSIZE_SHIFT)
+#define I40E_GLHMC_VFAPBVTINUSEBASE(_i) (0x000Cca00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFAPBVTINUSEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT 0
+#define I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFAPBVTINUSEBASE_FPMAPBINUSEBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART(_i) (0x00132240 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFCEQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT 0
+#define I40E_GLHMC_VFCEQPART_PMCEQBASE_MASK I40E_MASK(0xFF, I40E_GLHMC_VFCEQPART_PMCEQBASE_SHIFT)
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT 16
+#define I40E_GLHMC_VFCEQPART_PMCEQSIZE_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFCEQPART_PMCEQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART(_i) (0x00132140 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBCQPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT 0
+#define I40E_GLHMC_VFDBCQPART_PMDBCQBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBCQPART_PMDBCQBASE_SHIFT)
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBCQPART_PMDBCQSIZE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART(_i) (0x00138E00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFDBQPPART_MAX_INDEX 31
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT 0
+#define I40E_GLHMC_VFDBQPPART_PMDBQPBASE_MASK I40E_MASK(0x3FFF, I40E_GLHMC_VFDBQPPART_PMDBQPBASE_SHIFT)
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT 16
+#define I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_MASK I40E_MASK(0x7FFF, I40E_GLHMC_VFDBQPPART_PMDBQPSIZE_SHIFT)
+#define I40E_GLHMC_VFFSIAVBASE(_i) (0x000Cd600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT 0
+#define I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFFSIAVBASE_FPMFSIAVBASE_SHIFT)
+#define I40E_GLHMC_VFFSIAVCNT(_i) (0x000Cd700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFFSIAVCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT 0
+#define I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFFSIAVCNT_FPMFSIAVCNT_SHIFT)
+#define I40E_GLHMC_VFPDINV(_i) (0x000C8300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPDINV_MAX_INDEX 31
+#define I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT 0
+#define I40E_GLHMC_VFPDINV_PMSDIDX_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFPDINV_PMSDIDX_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_GLHMC_VFPDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_GLHMC_VFPDINV_PMSDPARTSEL_SHIFT)
+#define I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT 16
+#define I40E_GLHMC_VFPDINV_PMPDIDX_MASK I40E_MASK(0x1FF, I40E_GLHMC_VFPDINV_PMPDIDX_SHIFT)
+#define I40E_GLHMC_VFPEARPBASE(_i) (0x000Cc800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEARPBASE_FPMPEARPBASE_SHIFT)
+#define I40E_GLHMC_VFPEARPCNT(_i) (0x000Cc900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEARPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEARPCNT_FPMPEARPCNT_SHIFT)
+#define I40E_GLHMC_VFPECQBASE(_i) (0x000Cc200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT 0
+#define I40E_GLHMC_VFPECQBASE_FPMPECQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPECQBASE_FPMPECQBASE_SHIFT)
+#define I40E_GLHMC_VFPECQCNT(_i) (0x000Cc300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPECQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT 0
+#define I40E_GLHMC_VFPECQCNT_FPMPECQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPECQCNT_FPMPECQCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTCNT(_i) (0x000Cc700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT 0
+#define I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEHTCNT_FPMPEHTCNT_SHIFT)
+#define I40E_GLHMC_VFPEHTEBASE(_i) (0x000Cc600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEHTEBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT 0
+#define I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEHTEBASE_FPMPEHTEBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRBASE(_i) (0x000Ccc00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT 0
+#define I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEMRBASE_FPMPEMRBASE_SHIFT)
+#define I40E_GLHMC_VFPEMRCNT(_i) (0x000Ccd00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEMRCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT 0
+#define I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEMRCNT_FPMPEMRSZ_SHIFT)
+#define I40E_GLHMC_VFPEPBLBASE(_i) (0x000Cd800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEPBLBASE_FPMPEPBLBASE_SHIFT)
+#define I40E_GLHMC_VFPEPBLCNT(_i) (0x000Cd900 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEPBLCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT 0
+#define I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEPBLCNT_FPMPEPBLCNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1BASE(_i) (0x000Cd200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1BASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1BASE_FPMPEQ1BASE_SHIFT)
+#define I40E_GLHMC_VFPEQ1CNT(_i) (0x000Cd300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1CNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT 0
+#define I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQ1CNT_FPMPEQ1CNT_SHIFT)
+#define I40E_GLHMC_VFPEQ1FLBASE(_i) (0x000Cd400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQ1FLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQ1FLBASE_FPMPEQ1FLBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPBASE(_i) (0x000Cc000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT 0
+#define I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEQPBASE_FPMPEQPBASE_SHIFT)
+#define I40E_GLHMC_VFPEQPCNT(_i) (0x000Cc100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEQPCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT 0
+#define I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEQPCNT_FPMPEQPCNT_SHIFT)
+#define I40E_GLHMC_VFPESRQBASE(_i) (0x000Cc400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT 0
+#define I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPESRQBASE_FPMPESRQBASE_SHIFT)
+#define I40E_GLHMC_VFPESRQCNT(_i) (0x000Cc500 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPESRQCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT 0
+#define I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPESRQCNT_FPMPESRQCNT_SHIFT)
+#define I40E_GLHMC_VFPETIMERBASE(_i) (0x000CDA00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT 0
+#define I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPETIMERBASE_FPMPETIMERBASE_SHIFT)
+#define I40E_GLHMC_VFPETIMERCNT(_i) (0x000CDB00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPETIMERCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT 0
+#define I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPETIMERCNT_FPMPETIMERCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFBASE(_i) (0x000Cce00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFBASE_FPMPEXFBASE_SHIFT)
+#define I40E_GLHMC_VFPEXFCNT(_i) (0x000Ccf00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFCNT_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT 0
+#define I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_MASK I40E_MASK(0x1FFFFFFF, I40E_GLHMC_VFPEXFCNT_FPMPEXFCNT_SHIFT)
+#define I40E_GLHMC_VFPEXFFLBASE(_i) (0x000Cd000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFPEXFFLBASE_MAX_INDEX 31
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT 0
+#define I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_MASK I40E_MASK(0xFFFFFF, I40E_GLHMC_VFPEXFFLBASE_FPMPEXFFLBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART(_i) (0x000C8800 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLHMC_VFSDPART_MAX_INDEX 31
+#define I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT 0
+#define I40E_GLHMC_VFSDPART_PMSDBASE_MASK I40E_MASK(0xFFF, I40E_GLHMC_VFSDPART_PMSDBASE_SHIFT)
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT 16
+#define I40E_GLHMC_VFSDPART_PMSDSIZE_MASK I40E_MASK(0x1FFF, I40E_GLHMC_VFSDPART_PMSDSIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE 0x000A80BC /* Reset: CORER */
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPBLOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPBLOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPBLOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPBLOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPBLOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPBLOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE 0x000D0088 /* Reset: CORER */
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPDOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPDOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPDOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPDOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPDOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPDOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPDOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE 0x000A60E8 /* Reset: CORER */
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT 0
+#define I40E_GLPEOC_CACHESIZE_WORD_SIZE_MASK I40E_MASK(0xFF, I40E_GLPEOC_CACHESIZE_WORD_SIZE_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_SETS_SHIFT 8
+#define I40E_GLPEOC_CACHESIZE_SETS_MASK I40E_MASK(0xFFF, I40E_GLPEOC_CACHESIZE_SETS_SHIFT)
+#define I40E_GLPEOC_CACHESIZE_WAYS_SHIFT 20
+#define I40E_GLPEOC_CACHESIZE_WAYS_MASK I40E_MASK(0xF, I40E_GLPEOC_CACHESIZE_WAYS_SHIFT)
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_PDINV_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_PDINV_PMSDPARTSEL_SHIFT)
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT 15
+#define I40E_PFHMC_SDCMD_PMSDPARTSEL_MASK I40E_MASK(0x1, I40E_PFHMC_SDCMD_PMSDPARTSEL_SHIFT)
+#define I40E_GL_PPRS_SPARE 0x000856E0 /* Reset: CORER */
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT 0
+#define I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_PPRS_SPARE_GL_PPRS_SPARE_SHIFT)
+#define I40E_GL_TLAN_SPARE 0x000E64E0 /* Reset: CORER */
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT 0
+#define I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TLAN_SPARE_GL_TLAN_SPARE_SHIFT)
+#define I40E_GL_TUPM_SPARE 0x000a2230 /* Reset: CORER */
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT 0
+#define I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GL_TUPM_SPARE_GL_TUPM_SPARE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG 0x000B81C0 /* Reset: POR */
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT 0
+#define I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_UPPER_CORE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT 1
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_HIU_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT 2
+#define I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PE_CLK_EN_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT 3
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_PRIM_CLK_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT 4
+#define I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_PE_ACTIVE_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT 5
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_PRST_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT 6
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_SCLR_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT 7
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT 8
+#define I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_PCIE_RAW_IMIB_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT 9
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_EMP_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT 10
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_GLOBAL_RESET_N_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT 11
+#define I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CAR_RAW_LAN_POWER_GOOD_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT 12
+#define I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_CDC_IOSF_PRIMERY_RST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT 13
+#define I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_GBE_GLOBALRST_B_SHIFT)
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT 14
+#define I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLGEN_CAR_DEBUG_FLEEP_AL_GLOBR_DONE_SHIFT)
+#define I40E_GLGEN_MISC_SPARE 0x000880E0 /* Reset: POR */
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT 0
+#define I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_MASK I40E_MASK(0xFFFFFFFF, I40E_GLGEN_MISC_SPARE_GLGEN_MISC_SPARE_SHIFT)
+#define I40E_GL_UFUSE_SOC 0x000BE550 /* Reset: POR */
+#define I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT 0
+#define I40E_GL_UFUSE_SOC_PORT_MODE_MASK I40E_MASK(0x3, I40E_GL_UFUSE_SOC_PORT_MODE_SHIFT)
+#define I40E_GL_UFUSE_SOC_NIC_ID_SHIFT 2
+#define I40E_GL_UFUSE_SOC_NIC_ID_MASK I40E_MASK(0x1, I40E_GL_UFUSE_SOC_NIC_ID_SHIFT)
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT 3
+#define I40E_GL_UFUSE_SOC_SPARE_FUSES_MASK I40E_MASK(0x1FFF, I40E_GL_UFUSE_SOC_SPARE_FUSES_SHIFT)
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_PFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_PFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL0_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL0_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN_WB_ON_ITR_SHIFT)
+#define I40E_VPLAN_QBASE(_VF) (0x00074800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VPLAN_QBASE_MAX_INDEX 127
+#define I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT 0
+#define I40E_VPLAN_QBASE_VFFIRSTQ_MASK I40E_MASK(0x7FF, I40E_VPLAN_QBASE_VFFIRSTQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFNUMQ_SHIFT 11
+#define I40E_VPLAN_QBASE_VFNUMQ_MASK I40E_MASK(0xFF, I40E_VPLAN_QBASE_VFNUMQ_SHIFT)
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT 31
+#define I40E_VPLAN_QBASE_VFQTABLE_ENA_MASK I40E_MASK(0x1, I40E_VPLAN_QBASE_VFQTABLE_ENA_SHIFT)
+#define I40E_PRTMAC_LINK_DOWN_COUNTER 0x001E2440 /* Reset: GLOBR */
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT 0
+#define I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_MASK I40E_MASK(0xFFFF, I40E_PRTMAC_LINK_DOWN_COUNTER_LINK_DOWN_COUNTER_SHIFT)
+#define I40E_GLNVM_AL_REQ 0x000B6164 /* Reset: POR */
+#define I40E_GLNVM_AL_REQ_POR_SHIFT 0
+#define I40E_GLNVM_AL_REQ_POR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_POR_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT 1
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_SHIFT)
+#define I40E_GLNVM_AL_REQ_GLOBR_SHIFT 2
+#define I40E_GLNVM_AL_REQ_GLOBR_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_GLOBR_SHIFT)
+#define I40E_GLNVM_AL_REQ_CORER_SHIFT 3
+#define I40E_GLNVM_AL_REQ_CORER_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_CORER_SHIFT)
+#define I40E_GLNVM_AL_REQ_PE_SHIFT 4
+#define I40E_GLNVM_AL_REQ_PE_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PE_SHIFT)
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT 5
+#define I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_MASK I40E_MASK(0x1, I40E_GLNVM_AL_REQ_PCIE_IMIB_ASSERT_SHIFT)
+#define I40E_GLNVM_ALTIMERS 0x000B6140 /* Reset: POR */
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT 0
+#define I40E_GLNVM_ALTIMERS_PCI_ALTIMER_MASK I40E_MASK(0xFFF, I40E_GLNVM_ALTIMERS_PCI_ALTIMER_SHIFT)
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT 12
+#define I40E_GLNVM_ALTIMERS_GEN_ALTIMER_MASK I40E_MASK(0xFFFFF, I40E_GLNVM_ALTIMERS_GEN_ALTIMER_SHIFT)
+#define I40E_GLNVM_FLA 0x000B6108 /* Reset: POR */
+#define I40E_GLNVM_FLA_LOCKED_SHIFT 6
+#define I40E_GLNVM_FLA_LOCKED_MASK I40E_MASK(0x1, I40E_GLNVM_FLA_LOCKED_SHIFT)
+
+#define I40E_GLNVM_ULD 0x000B6008 /* Reset: POR */
+#define I40E_GLNVM_ULD_PCIER_DONE_SHIFT 0
+#define I40E_GLNVM_ULD_PCIER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT 1
+#define I40E_GLNVM_ULD_PCIER_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_CORER_DONE_SHIFT 3
+#define I40E_GLNVM_ULD_CORER_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_CORER_DONE_SHIFT)
+#define I40E_GLNVM_ULD_GLOBR_DONE_SHIFT 4
+#define I40E_GLNVM_ULD_GLOBR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_GLOBR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_SHIFT 5
+#define I40E_GLNVM_ULD_POR_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_SHIFT)
+#define I40E_GLNVM_ULD_POR_DONE_1_SHIFT 8
+#define I40E_GLNVM_ULD_POR_DONE_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_POR_DONE_1_SHIFT)
+#define I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT 9
+#define I40E_GLNVM_ULD_PCIER_DONE_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PCIER_DONE_2_SHIFT)
+#define I40E_GLNVM_ULD_PE_DONE_SHIFT 10
+#define I40E_GLNVM_ULD_PE_DONE_MASK I40E_MASK(0x1, I40E_GLNVM_ULD_PE_DONE_SHIFT)
+#define I40E_GLNVM_ULT 0x000B6154 /* Reset: POR */
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT 0
+#define I40E_GLNVM_ULT_CONF_PCIR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIR_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT 1
+#define I40E_GLNVM_ULT_CONF_PCIRTL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIRTL_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_1_SHIFT 2
+#define I40E_GLNVM_ULT_RESERVED_1_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_1_SHIFT)
+#define I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT 3
+#define I40E_GLNVM_ULT_CONF_CORE_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_CORE_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT 4
+#define I40E_GLNVM_ULT_CONF_GLOBAL_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_GLOBAL_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_POR_AE_SHIFT 5
+#define I40E_GLNVM_ULT_CONF_POR_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_POR_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_2_SHIFT 6
+#define I40E_GLNVM_ULT_RESERVED_2_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_2_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_3_SHIFT 7
+#define I40E_GLNVM_ULT_RESERVED_3_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_RESERVED_3_SHIFT)
+#define I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT 8
+#define I40E_GLNVM_ULT_CONF_EMP_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_EMP_AE_SHIFT)
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT 9
+#define I40E_GLNVM_ULT_CONF_PCIALT_AE_MASK I40E_MASK(0x1, I40E_GLNVM_ULT_CONF_PCIALT_AE_SHIFT)
+#define I40E_GLNVM_ULT_RESERVED_4_SHIFT 10
+#define I40E_GLNVM_ULT_RESERVED_4_MASK I40E_MASK(0x3FFFFF, I40E_GLNVM_ULT_RESERVED_4_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT 0x000B615C /* Reset: POR */
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT 0
+#define I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_CMLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT 1
+#define I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PMAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT 2
+#define I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT 3
+#define I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT 4
+#define I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT 5
+#define I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RLAN_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT 6
+#define I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RDPU_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT 7
+#define I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_PPRS_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT 8
+#define I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT 9
+#define I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TPB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT 10
+#define I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_FOC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT 11
+#define I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TSCD_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT 12
+#define I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_TCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT 13
+#define I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_RCB_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT 14
+#define I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_WUC_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT 15
+#define I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_STAT_MEM_INIT_DONE_SHIFT)
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT 16
+#define I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_MASK I40E_MASK(0x1, I40E_MEM_INIT_DONE_STAT_ITR_MEM_INIT_DONE_SHIFT)
+#define I40E_MNGSB_DADD 0x000B7030 /* Reset: POR */
+#define I40E_MNGSB_DADD_ADDR_SHIFT 0
+#define I40E_MNGSB_DADD_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DADD_ADDR_SHIFT)
+#define I40E_MNGSB_DCNT 0x000B7034 /* Reset: POR */
+#define I40E_MNGSB_DCNT_BYTE_CNT_SHIFT 0
+#define I40E_MNGSB_DCNT_BYTE_CNT_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_DCNT_BYTE_CNT_SHIFT)
+#define I40E_MNGSB_MSGCTL 0x000B7020 /* Reset: POR */
+#define I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT 0
+#define I40E_MNGSB_MSGCTL_HDR_DWS_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_HDR_DWS_SHIFT)
+#define I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT 8
+#define I40E_MNGSB_MSGCTL_EXP_RDW_MASK I40E_MASK(0x1FF, I40E_MNGSB_MSGCTL_EXP_RDW_SHIFT)
+#define I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT 26
+#define I40E_MNGSB_MSGCTL_MSG_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_MSG_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT 28
+#define I40E_MNGSB_MSGCTL_TOKEN_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_MSGCTL_TOKEN_MODE_SHIFT)
+#define I40E_MNGSB_MSGCTL_BARCLR_SHIFT 30
+#define I40E_MNGSB_MSGCTL_BARCLR_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_BARCLR_SHIFT)
+#define I40E_MNGSB_MSGCTL_CMDV_SHIFT 31
+#define I40E_MNGSB_MSGCTL_CMDV_MASK I40E_MASK(0x1, I40E_MNGSB_MSGCTL_CMDV_SHIFT)
+#define I40E_MNGSB_RDATA 0x000B7300 /* Reset: POR */
+#define I40E_MNGSB_RDATA_DATA_SHIFT 0
+#define I40E_MNGSB_RDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_RDATA_DATA_SHIFT)
+#define I40E_MNGSB_RHDR0 0x000B72FC /* Reset: POR */
+#define I40E_MNGSB_RHDR0_DESTINATION_SHIFT 0
+#define I40E_MNGSB_RHDR0_DESTINATION_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_DESTINATION_SHIFT)
+#define I40E_MNGSB_RHDR0_SOURCE_SHIFT 8
+#define I40E_MNGSB_RHDR0_SOURCE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_SOURCE_SHIFT)
+#define I40E_MNGSB_RHDR0_OPCODE_SHIFT 16
+#define I40E_MNGSB_RHDR0_OPCODE_MASK I40E_MASK(0xFF, I40E_MNGSB_RHDR0_OPCODE_SHIFT)
+#define I40E_MNGSB_RHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_RHDR0_TAG_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_TAG_SHIFT)
+#define I40E_MNGSB_RHDR0_RESPONSE_SHIFT 27
+#define I40E_MNGSB_RHDR0_RESPONSE_MASK I40E_MASK(0x7, I40E_MNGSB_RHDR0_RESPONSE_SHIFT)
+#define I40E_MNGSB_RHDR0_EH_SHIFT 31
+#define I40E_MNGSB_RHDR0_EH_MASK I40E_MASK(0x1, I40E_MNGSB_RHDR0_EH_SHIFT)
+#define I40E_MNGSB_RSPCTL 0x000B7024 /* Reset: POR */
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT 0
+#define I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_MASK I40E_MASK(0x1FF, I40E_MNGSB_RSPCTL_DMA_MSG_DWORDS_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT 26
+#define I40E_MNGSB_RSPCTL_RSP_MODE_MASK I40E_MASK(0x3, I40E_MNGSB_RSPCTL_RSP_MODE_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT 30
+#define I40E_MNGSB_RSPCTL_RSP_BAD_LEN_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_BAD_LEN_SHIFT)
+#define I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT 31
+#define I40E_MNGSB_RSPCTL_RSP_ERR_MASK I40E_MASK(0x1, I40E_MNGSB_RSPCTL_RSP_ERR_SHIFT)
+#define I40E_MNGSB_WDATA 0x000B7100 /* Reset: POR */
+#define I40E_MNGSB_WDATA_DATA_SHIFT 0
+#define I40E_MNGSB_WDATA_DATA_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WDATA_DATA_SHIFT)
+#define I40E_MNGSB_WHDR0 0x000B70F4 /* Reset: POR */
+#define I40E_MNGSB_WHDR0_RAW_DEST_SHIFT 0
+#define I40E_MNGSB_WHDR0_RAW_DEST_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_RAW_DEST_SHIFT)
+#define I40E_MNGSB_WHDR0_DEST_SEL_SHIFT 12
+#define I40E_MNGSB_WHDR0_DEST_SEL_MASK I40E_MASK(0xF, I40E_MNGSB_WHDR0_DEST_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT 16
+#define I40E_MNGSB_WHDR0_OPCODE_SEL_MASK I40E_MASK(0xFF, I40E_MNGSB_WHDR0_OPCODE_SEL_SHIFT)
+#define I40E_MNGSB_WHDR0_TAG_SHIFT 24
+#define I40E_MNGSB_WHDR0_TAG_MASK I40E_MASK(0x7F, I40E_MNGSB_WHDR0_TAG_SHIFT)
+#define I40E_MNGSB_WHDR1 0x000B70F8 /* Reset: POR */
+#define I40E_MNGSB_WHDR1_ADDR_SHIFT 0
+#define I40E_MNGSB_WHDR1_ADDR_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR1_ADDR_SHIFT)
+#define I40E_MNGSB_WHDR2 0x000B70FC /* Reset: POR */
+#define I40E_MNGSB_WHDR2_LENGTH_SHIFT 0
+#define I40E_MNGSB_WHDR2_LENGTH_MASK I40E_MASK(0xFFFFFFFF, I40E_MNGSB_WHDR2_LENGTH_SHIFT)
+
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT 21
+#define I40E_GLPCI_CAPSUP_WAKUP_EN_MASK I40E_MASK(0x1, I40E_GLPCI_CAPSUP_WAKUP_EN_SHIFT)
+
+#define I40E_GLPCI_CUR_CLNT_COMMON 0x0009CA18 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_COMMON_OSR_SHIFT)
+#define I40E_GLPCI_CUR_CLNT_PIPEMON 0x0009CA20 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD 0x0009c514 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD 0x0009c594 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_MNG_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_MNG_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD 0x0009c510 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD 0x0009c590 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_PMAT_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_PMAT_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD 0x0009c500 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD 0x0009c580 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD 0x0009c508 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD 0x0009c588 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_RXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_RXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD 0x0009c518 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD 0x0009c598 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TDPU_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TDPU_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD 0x0009c504 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD 0x0009c584 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TLAN_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TLAN_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD 0x0009c50C /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD 0x0009c58c /* Reset: PCIR */
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_TXPE_RSVD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_TXPE_RSVD_OSR_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON 0x0009CA28 /* Reset: PCIR */
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT 16
+#define I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_CUR_WATMK_CLNT_COMMON_OSR_SHIFT)
+
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT 4
+#define I40E_GLPCI_LBARCTRL_PE_DB_SIZE_MASK I40E_MASK(0x3, I40E_GLPCI_LBARCTRL_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT 10
+#define I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_MASK I40E_MASK(0x1, I40E_GLPCI_LBARCTRL_VF_PE_DB_SIZE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG 0x0009CA00 /* Reset: PCIR */
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT 0
+#define I40E_GLPCI_NPQ_CFG_EXTEND_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_EXTEND_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT 1
+#define I40E_GLPCI_NPQ_CFG_SMALL_TO_MASK I40E_MASK(0x1, I40E_GLPCI_NPQ_CFG_SMALL_TO_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT 2
+#define I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_WEIGHT_AVG_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT 6
+#define I40E_GLPCI_NPQ_CFG_NPQ_SPARE_MASK I40E_MASK(0x3FF, I40E_GLPCI_NPQ_CFG_NPQ_SPARE_SHIFT)
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT 16
+#define I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_MASK I40E_MASK(0xF, I40E_GLPCI_NPQ_CFG_NPQ_ERR_STAT_SHIFT)
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON 0x0009CA30 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_CLNT_PIPEMON_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD 0x0009CB14 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_MNG_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_MNG_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD 0x0009CB10 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_PMAT_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_PMAT_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD 0x0009CB00 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD 0x0009CB08 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_RXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_RXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD 0x0009CB04 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TLAN_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TLAN_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD 0x0009CB18 /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TPDU_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TPDU_ALWD_OSR_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD 0x0009CB0c /* Reset: PCIR */
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT 0
+#define I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_DATA_LINES_SHIFT)
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT 16
+#define I40E_GLPCI_WATMK_TXPE_ALWD_OSR_MASK I40E_MASK(0xFFFF, I40E_GLPCI_WATMK_TXPE_ALWD_OSR_SHIFT)
+#define I40E_GLPE_CPUSTATUS0 0x0000D040 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT 0
+#define I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS0_PECPUSTATUS0_SHIFT)
+#define I40E_GLPE_CPUSTATUS1 0x0000D044 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT 0
+#define I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS1_PECPUSTATUS1_SHIFT)
+#define I40E_GLPE_CPUSTATUS2 0x0000D048 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT 0
+#define I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPE_CPUSTATUS2_PECPUSTATUS2_SHIFT)
+#define I40E_GLPE_CPUTRIG0 0x0000D060 /* Reset: PE_CORER */
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT 0
+#define I40E_GLPE_CPUTRIG0_PECPUTRIG0_MASK I40E_MASK(0xFFFF, I40E_GLPE_CPUTRIG0_PECPUTRIG0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT 17
+#define I40E_GLPE_CPUTRIG0_TEPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_TEPREQUEST0_SHIFT)
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT 18
+#define I40E_GLPE_CPUTRIG0_OOPREQUEST0_MASK I40E_MASK(0x1, I40E_GLPE_CPUTRIG0_OOPREQUEST0_SHIFT)
+#define I40E_GLPE_DUAL40_RUPM 0x0000DA04 /* Reset: PE_CORER */
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT 0
+#define I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_MASK I40E_MASK(0x1, I40E_GLPE_DUAL40_RUPM_DUAL_40G_MODE_SHIFT)
+#define I40E_GLPE_PFAEQEDROPCNT(_i) (0x00131440 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFAEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCEQEDROPCNT(_i) (0x001313C0 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCEQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_PFCQEDROPCNT(_i) (0x00131340 + ((_i) * 4)) /* _i=0...15 */ /* Reset: CORER */
+#define I40E_GLPE_PFCQEDROPCNT_MAX_INDEX 15
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_PFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_RUPM_CQPPOOL 0x0000DACC /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_CQPPOOL_CQPSPADS_SHIFT)
+#define I40E_GLPE_RUPM_FLRPOOL 0x0000DAC4 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_FLRPOOL_FLRSPADS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL 0x0000DA00 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT 0
+#define I40E_GLPE_RUPM_GCTL_ALLOFFTH_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_GCTL_ALLOFFTH_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT 26
+#define I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P0_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT 27
+#define I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P1_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT 28
+#define I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P2_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT 29
+#define I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_P3_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT 30
+#define I40E_GLPE_RUPM_GCTL_RUPM_DIS_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_RUPM_DIS_SHIFT)
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT 31
+#define I40E_GLPE_RUPM_GCTL_SWLB_MODE_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_GCTL_SWLB_MODE_SHIFT)
+#define I40E_GLPE_RUPM_PTXPOOL 0x0000DAC8 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PTXPOOL_PTXSPADS_SHIFT)
+#define I40E_GLPE_RUPM_PUSHPOOL 0x0000DAC0 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT 0
+#define I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_MASK I40E_MASK(0xFF, I40E_GLPE_RUPM_PUSHPOOL_PUSHSPADS_SHIFT)
+#define I40E_GLPE_RUPM_TXHOST_EN 0x0000DA08 /* Reset: PE_CORER */
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT 0
+#define I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_MASK I40E_MASK(0x1, I40E_GLPE_RUPM_TXHOST_EN_TXHOST_EN_SHIFT)
+#define I40E_GLPE_VFAEQEDROPCNT(_i) (0x00132540 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFAEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFAEQEDROPCNT_AEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCEQEDROPCNT(_i) (0x00132440 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCEQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCEQEDROPCNT_CEQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFCQEDROPCNT(_i) (0x00132340 + ((_i) * 4)) /* _i=0...31 */ /* Reset: CORER */
+#define I40E_GLPE_VFCQEDROPCNT_MAX_INDEX 31
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT 0
+#define I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFCQEDROPCNT_CQEDROPCNT_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL(_i) (0x0000D400 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMOBJCTRL_MAX_INDEX 31
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT 0
+#define I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_XMIT_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT 8
+#define I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_MASK I40E_MASK(0x7, I40E_GLPE_VFFLMOBJCTRL_Q1_BLOCKSIZE_SHIFT)
+#define I40E_GLPE_VFFLMQ1ALLOCERR(_i) (0x0000C700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMQ1ALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFFLMXMITALLOCERR(_i) (0x0000C600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFFLMXMITALLOCERR_MAX_INDEX 31
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_GLPE_VFFLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_GLPE_VFUDACTRL(_i) (0x0000C000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDACTRL_MAX_INDEX 31
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_GLPE_VFUDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN(_i) (0x0000C100 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPE_VFUDAUCFBQPN_MAX_INDEX 31
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT 0
+#define I40E_GLPE_VFUDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_GLPE_VFUDAUCFBQPN_QPN_SHIFT)
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT 31
+#define I40E_GLPE_VFUDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_GLPE_VFUDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_AEQALLOC 0x00131180 /* Reset: PFR */
+#define I40E_PFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_PFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_PFPE_CCQPHIGH 0x00008200 /* Reset: PFR */
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_PFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_PFPE_CCQPLOW 0x00008180 /* Reset: PFR */
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_PFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_PFPE_CCQPSTATUS 0x00008100 /* Reset: PFR */
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_PFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_PFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_PFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_PFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_PFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_PFPE_CQACK 0x00131100 /* Reset: PFR */
+#define I40E_PFPE_CQACK_PECQID_SHIFT 0
+#define I40E_PFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQACK_PECQID_SHIFT)
+#define I40E_PFPE_CQARM 0x00131080 /* Reset: PFR */
+#define I40E_PFPE_CQARM_PECQID_SHIFT 0
+#define I40E_PFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_PFPE_CQARM_PECQID_SHIFT)
+#define I40E_PFPE_CQPDB 0x00008000 /* Reset: PFR */
+#define I40E_PFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_PFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_PFPE_CQPERRCODES 0x00008880 /* Reset: PFR */
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_PFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_PFPE_CQPTAIL 0x00008080 /* Reset: PFR */
+#define I40E_PFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_PFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_PFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_PFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_PFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_PFPE_FLMQ1ALLOCERR 0x00008980 /* Reset: PFR */
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMQ1ALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_FLMXMITALLOCERR 0x00008900 /* Reset: PFR */
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT 0
+#define I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_MASK I40E_MASK(0xFFFF, I40E_PFPE_FLMXMITALLOCERR_ERROR_COUNT_SHIFT)
+#define I40E_PFPE_IPCONFIG0 0x00008280 /* Reset: PFR */
+#define I40E_PFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_PFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_PFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_PFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_PFPE_MRTEIDXMASK 0x00008600 /* Reset: PFR */
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_PFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_PFPE_RCVUNEXPECTEDERROR 0x00008680 /* Reset: PFR */
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_PFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_PFPE_TCPNOWTIMER 0x00008580 /* Reset: PFR */
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_PFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_PFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_PFPE_UDACTRL 0x00008700 /* Reset: PFR */
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT 0
+#define I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT 1
+#define I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV4UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT 2
+#define I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6MCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT 3
+#define I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_IPV6UCFRAGRESBP_SHIFT)
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT 4
+#define I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_MASK I40E_MASK(0x1, I40E_PFPE_UDACTRL_UDPMCFRAGRESFAIL_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN 0x00008780 /* Reset: PFR */
+#define I40E_PFPE_UDAUCFBQPN_QPN_SHIFT 0
+#define I40E_PFPE_UDAUCFBQPN_QPN_MASK I40E_MASK(0x3FFFF, I40E_PFPE_UDAUCFBQPN_QPN_SHIFT)
+#define I40E_PFPE_UDAUCFBQPN_VALID_SHIFT 31
+#define I40E_PFPE_UDAUCFBQPN_VALID_MASK I40E_MASK(0x1, I40E_PFPE_UDAUCFBQPN_VALID_SHIFT)
+#define I40E_PFPE_WQEALLOC 0x00138C00 /* Reset: PFR */
+#define I40E_PFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_PFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_PFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_PFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_PRTDCB_RLPMC 0x0001F140 /* Reset: PE_CORER */
+#define I40E_PRTDCB_RLPMC_TC2PFC_SHIFT 0
+#define I40E_PRTDCB_RLPMC_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTDCB_RLPMC_TC2PFC_SHIFT)
+#define I40E_PRTDCB_TCMSTC_RLPM(_i) (0x0001F040 + ((_i) * 32)) /* _i=0...7 */ /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCMSTC_RLPM_MAX_INDEX 7
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT 0
+#define I40E_PRTDCB_TCMSTC_RLPM_MSTC_MASK I40E_MASK(0xFFFFF, I40E_PRTDCB_TCMSTC_RLPM_MSTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM 0x0001F1A0 /* Reset: PE_CORER */
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT 0
+#define I40E_PRTDCB_TCPMC_RLPM_CPM_MASK I40E_MASK(0x1FFF, I40E_PRTDCB_TCPMC_RLPM_CPM_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT 13
+#define I40E_PRTDCB_TCPMC_RLPM_LLTC_MASK I40E_MASK(0xFF, I40E_PRTDCB_TCPMC_RLPM_LLTC_SHIFT)
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT 30
+#define I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_MASK I40E_MASK(0x1, I40E_PRTDCB_TCPMC_RLPM_TCPM_MODE_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03 0x0000DAE0 /* Reset: PE_CORER */
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTE_RUPM_TCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CNTR 0x0000DB20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_CNTR_COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CNTR_COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_CTL 0x0000DA40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_CTL_LLTC_SHIFT 13
+#define I40E_PRTPE_RUPM_CTL_LLTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_CTL_LLTC_SHIFT)
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT 30
+#define I40E_PRTPE_RUPM_CTL_RUPM_MODE_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_CTL_RUPM_MODE_SHIFT)
+#define I40E_PRTPE_RUPM_PFCCTL 0x0000DA60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCCTL_TC2PFC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCCTL_TC2PFC_SHIFT)
+#define I40E_PRTPE_RUPM_PFCPC 0x0000DA80 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCPC_PORTOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC 0x0000DAA0 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT 0
+#define I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_TCOFFTH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT 16
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_TH_SHIFT)
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT 31
+#define I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_MASK I40E_MASK(0x1, I40E_PRTPE_RUPM_PFCTCC_LL_PRI_EN_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47 0x0000DB60 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTCTCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03 0x0000DB40 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC0COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC1COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC2COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_PTXTCCNTR03_TC3COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47 0x0000DB00 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT 0
+#define I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC4COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT 8
+#define I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC5COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT 16
+#define I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC6COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT 24
+#define I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_TCCNTR47_TC7COUNT_SHIFT)
+#define I40E_PRTPE_RUPM_THRES 0x0000DA20 /* Reset: PE_CORER */
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT 0
+#define I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MINSPADSPERTC_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT 8
+#define I40E_PRTPE_RUPM_THRES_MAXSPADS_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADS_SHIFT)
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT 16
+#define I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_MASK I40E_MASK(0xFF, I40E_PRTPE_RUPM_THRES_MAXSPADSPERTC_SHIFT)
+#define I40E_VFPE_AEQALLOC(_VF) (0x00130C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC_MAX_INDEX 127
+#define I40E_VFPE_AEQALLOC_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH(_VF) (0x00001000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH_MAX_INDEX 127
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW(_VF) (0x00000C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW_MAX_INDEX 127
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS(_VF) (0x00000800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS_MAX_INDEX 127
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK(_VF) (0x00130800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQACK_MAX_INDEX 127
+#define I40E_VFPE_CQACK_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK_PECQID_SHIFT)
+#define I40E_VFPE_CQARM(_VF) (0x00130400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQARM_MAX_INDEX 127
+#define I40E_VFPE_CQARM_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB(_VF) (0x00000000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPDB_MAX_INDEX 127
+#define I40E_VFPE_CQPDB_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES(_VF) (0x00001800 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES_MAX_INDEX 127
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL(_VF) (0x00000400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL_MAX_INDEX 127
+#define I40E_VFPE_CQPTAIL_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG0(_VF) (0x00001400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG0_MAX_INDEX 127
+#define I40E_VFPE_IPCONFIG0_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG0_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG0_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG0_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK(_VF) (0x00003000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK_MAX_INDEX 127
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR(_VF) (0x00003400 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR_MAX_INDEX 127
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER(_VF) (0x00002C00 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER_MAX_INDEX 127
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC(_VF) (0x00138000 + ((_VF) * 4)) /* _i=0...127 */ /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC_MAX_INDEX 127
+#define I40E_VFPE_WQEALLOC_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC_WQE_DESC_INDEX_SHIFT)
+#define I40E_GLPES_PFIP4RXDISCARD(_i) (0x00010600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSHI(_i) (0x00010804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXFRAGSLO(_i) (0x00010800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSHI(_i) (0x00010A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCOCTSLO(_i) (0x00010A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSHI(_i) (0x00010C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXMCPKTSLO(_i) (0x00010C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSHI(_i) (0x00010204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXOCTSLO(_i) (0x00010200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSHI(_i) (0x00010404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4RXPKTSLO(_i) (0x00010400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4RXTRUNC(_i) (0x00010700 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSHI(_i) (0x00011E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXFRAGSLO(_i) (0x00011E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSHI(_i) (0x00012004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCOCTSLO(_i) (0x00012000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSHI(_i) (0x00012204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXMCPKTSLO(_i) (0x00012200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXNOROUTE(_i) (0x00012E00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSHI(_i) (0x00011A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXOCTSLO(_i) (0x00011A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSHI(_i) (0x00011C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP4TXPKTSLO(_i) (0x00011C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP4TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXDISCARD(_i) (0x00011200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXDISCARD_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSHI(_i) (0x00011404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXFRAGSLO(_i) (0x00011400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSHI(_i) (0x00011604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCOCTSLO(_i) (0x00011600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSHI(_i) (0x00011804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXMCPKTSLO(_i) (0x00011800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSHI(_i) (0x00010E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXOCTSLO(_i) (0x00010E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSHI(_i) (0x00011004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6RXPKTSLO(_i) (0x00011000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6RXTRUNC(_i) (0x00011300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6RXTRUNC_MAX_INDEX 15
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSHI(_i) (0x00012804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXFRAGSLO(_i) (0x00012800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXFRAGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSHI(_i) (0x00012A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCOCTSLO(_i) (0x00012A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSHI(_i) (0x00012C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXMCPKTSLO(_i) (0x00012C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXMCPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXNOROUTE(_i) (0x00012F00 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXNOROUTE_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSHI(_i) (0x00012404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXOCTSLO(_i) (0x00012400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXOCTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSHI(_i) (0x00012604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_PFIP6TXPKTSLO(_i) (0x00012600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFIP6TXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSHI(_i) (0x00013E04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXRDSLO(_i) (0x00013E00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSHI(_i) (0x00014004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXSNDSLO(_i) (0x00014000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSHI(_i) (0x00013C04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMARXWRSLO(_i) (0x00013C00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMARXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSHI(_i) (0x00014404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXRDSLO(_i) (0x00014400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXRDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSHI(_i) (0x00014604 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXSNDSLO(_i) (0x00014600 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXSNDSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSHI(_i) (0x00014204 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_PFRDMATXWRSLO(_i) (0x00014200 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMATXWRSLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDHI(_i) (0x00014804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_PFRDMAVBNDLO(_i) (0x00014800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVBNDLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_PFRDMAVINVHI(_i) (0x00014A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVHI_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_PFRDMAVINVLO(_i) (0x00014A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRDMAVINVLO_MAX_INDEX 15
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_PFRXVLANERR(_i) (0x00010000 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFRXVLANERR_MAX_INDEX 15
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_PFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_PFTCPRTXSEG(_i) (0x00013600 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRTXSEG_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_PFTCPRXOPTERR(_i) (0x00013200 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXOPTERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_PFTCPRXPROTOERR(_i) (0x00013300 + ((_i) * 4)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXPROTOERR_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_PFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSHI(_i) (0x00013004 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_PFTCPRXSEGSLO(_i) (0x00013000 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPRXSEGSLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGHI(_i) (0x00013404 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGHI_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_PFTCPTXSEGLO(_i) (0x00013400 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFTCPTXSEGLO_MAX_INDEX 15
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSHI(_i) (0x00013804 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPRXPKTSLO(_i) (0x00013800 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPRXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSHI(_i) (0x00013A04 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSHI_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_PFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_PFUDPTXPKTSLO(_i) (0x00013A00 + ((_i) * 8)) /* _i=0...15 */ /* Reset: PE_CORER */
+#define I40E_GLPES_PFUDPTXPKTSLO_MAX_INDEX 15
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_PFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSHI 0x0001E014 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXMULTFPDUSHI_RDMARXMULTFPDUSHI_SHIFT)
+#define I40E_GLPES_RDMARXMULTFPDUSLO 0x0001E010 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT 0
+#define I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXMULTFPDUSLO_RDMARXMULTFPDUSLO_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPHI 0x0001E01C /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_RDMARXOOODDPHI_RDMARXOOODDPHI_SHIFT)
+#define I40E_GLPES_RDMARXOOODDPLO 0x0001E018 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT 0
+#define I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOODDPLO_RDMARXOOODDPLO_SHIFT)
+#define I40E_GLPES_RDMARXOOONOMARK 0x0001E004 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT 0
+#define I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXOOONOMARK_RDMAOOONOMARK_SHIFT)
+#define I40E_GLPES_RDMARXUNALIGN 0x0001E000 /* Reset: PE_CORER */
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT 0
+#define I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_RDMARXUNALIGN_RDMRXAUNALIGN_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLEHI 0x0001E044 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXFOURHOLEHI_TCPRXFOURHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXFOURHOLELO 0x0001E040 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXFOURHOLELO_TCPRXFOURHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLEHI 0x0001E02C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXONEHOLEHI_TCPRXONEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXONEHOLELO 0x0001E028 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXONEHOLELO_TCPRXONEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKHI 0x0001E024 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXPUREACKHI_TCPRXPUREACKSHI_SHIFT)
+#define I40E_GLPES_TCPRXPUREACKSLO 0x0001E020 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT 0
+#define I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXPUREACKSLO_TCPRXPUREACKLO_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLEHI 0x0001E03C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTHREEHOLEHI_TCPRXTHREEHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTHREEHOLELO 0x0001E038 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTHREEHOLELO_TCPRXTHREEHOLELO_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLEHI 0x0001E034 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPRXTWOHOLEHI_TCPRXTWOHOLEHI_SHIFT)
+#define I40E_GLPES_TCPRXTWOHOLELO 0x0001E030 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT 0
+#define I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPRXTWOHOLELO_TCPRXTWOHOLELO_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTHI 0x0001E04C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXRETRANSFASTHI_TCPTXRETRANSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXRETRANSFASTLO 0x0001E048 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXRETRANSFASTLO_TCPTXRETRANSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTHI 0x0001E054 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSFASTHI_TCPTXTOUTSFASTHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSFASTLO 0x0001E050 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSFASTLO_TCPTXTOUTSFASTLO_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSHI 0x0001E05C /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_TCPTXTOUTSHI_TCPTXTOUTSHI_SHIFT)
+#define I40E_GLPES_TCPTXTOUTSLO 0x0001E058 /* Reset: PE_CORER */
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT 0
+#define I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_TCPTXTOUTSLO_TCPTXTOUTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXDISCARD(_i) (0x00018600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXDISCARD_IP4RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSHI(_i) (0x00018804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXFRAGSHI_IP4RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXFRAGSLO(_i) (0x00018800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXFRAGSLO_IP4RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSHI(_i) (0x00018A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCOCTSHI_IP4RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCOCTSLO(_i) (0x00018A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCOCTSLO_IP4RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSHI(_i) (0x00018C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXMCPKTSHI_IP4RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXMCPKTSLO(_i) (0x00018C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXMCPKTSLO_IP4RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSHI(_i) (0x00018204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXOCTSHI_IP4RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXOCTSLO(_i) (0x00018200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXOCTSLO_IP4RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSHI(_i) (0x00018404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4RXPKTSHI_IP4RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4RXPKTSLO(_i) (0x00018400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXPKTSLO_IP4RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4RXTRUNC(_i) (0x00018700 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4RXTRUNC_IP4RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSHI(_i) (0x00019E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXFRAGSHI_IP4TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXFRAGSLO(_i) (0x00019E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXFRAGSLO_IP4TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSHI(_i) (0x0001A004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCOCTSHI_IP4TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCOCTSLO(_i) (0x0001A000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCOCTSLO_IP4TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSHI(_i) (0x0001A204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXMCPKTSHI_IP4TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXMCPKTSLO(_i) (0x0001A200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXMCPKTSLO_IP4TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXNOROUTE(_i) (0x0001AE00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP4TXNOROUTE_IP4TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSHI(_i) (0x00019A04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXOCTSHI_IP4TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXOCTSLO(_i) (0x00019A00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXOCTSLO_IP4TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSHI(_i) (0x00019C04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP4TXPKTSHI_IP4TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP4TXPKTSLO(_i) (0x00019C00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP4TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP4TXPKTSLO_IP4TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXDISCARD(_i) (0x00019200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXDISCARD_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT 0
+#define I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXDISCARD_IP6RXDISCARD_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSHI(_i) (0x00019404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXFRAGSHI_IP6RXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXFRAGSLO(_i) (0x00019400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXFRAGSLO_IP6RXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSHI(_i) (0x00019604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCOCTSHI_IP6RXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCOCTSLO(_i) (0x00019600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCOCTSLO_IP6RXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSHI(_i) (0x00019804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXMCPKTSHI_IP6RXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXMCPKTSLO(_i) (0x00019800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXMCPKTSLO_IP6RXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSHI(_i) (0x00018E04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXOCTSHI_IP6RXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXOCTSLO(_i) (0x00018E00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXOCTSLO_IP6RXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSHI(_i) (0x00019004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6RXPKTSHI_IP6RXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6RXPKTSLO(_i) (0x00019000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXPKTSLO_IP6RXPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6RXTRUNC(_i) (0x00019300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6RXTRUNC_MAX_INDEX 31
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT 0
+#define I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6RXTRUNC_IP6RXTRUNC_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSHI(_i) (0x0001A804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXFRAGSHI_IP6TXFRAGSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXFRAGSLO(_i) (0x0001A800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXFRAGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXFRAGSLO_IP6TXFRAGSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSHI(_i) (0x0001AA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCOCTSHI_IP6TXMCOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCOCTSLO(_i) (0x0001AA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCOCTSLO_IP6TXMCOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSHI(_i) (0x0001AC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXMCPKTSHI_IP6TXMCPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXMCPKTSLO(_i) (0x0001AC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXMCPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXMCPKTSLO_IP6TXMCPKTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXNOROUTE(_i) (0x0001AF00 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXNOROUTE_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT 0
+#define I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFIP6TXNOROUTE_IP6TXNOROUTE_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSHI(_i) (0x0001A404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXOCTSHI_IP6TXOCTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXOCTSLO(_i) (0x0001A400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXOCTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXOCTSLO_IP6TXOCTSLO_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSHI(_i) (0x0001A604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFIP6TXPKTSHI_IP6TXPKTSHI_SHIFT)
+#define I40E_GLPES_VFIP6TXPKTSLO(_i) (0x0001A600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFIP6TXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFIP6TXPKTSLO_IP6TXPKTSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSHI(_i) (0x0001BE04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXRDSLO(_i) (0x0001BE00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSHI(_i) (0x0001C004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXSNDSLO(_i) (0x0001C000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSHI(_i) (0x0001BC04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMARXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMARXWRSLO(_i) (0x0001BC00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMARXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMARXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSHI(_i) (0x0001C404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXRDSHI_RDMARXRDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXRDSLO(_i) (0x0001C400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXRDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXRDSLO_RDMARXRDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSHI(_i) (0x0001C604 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXSNDSHI_RDMARXSNDSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXSNDSLO(_i) (0x0001C600 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXSNDSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXSNDSLO_RDMARXSNDSLO_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSHI(_i) (0x0001C204 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFRDMATXWRSHI_RDMARXWRSHI_SHIFT)
+#define I40E_GLPES_VFRDMATXWRSLO(_i) (0x0001C200 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMATXWRSLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT 0
+#define I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMATXWRSLO_RDMARXWRSLO_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDHI(_i) (0x0001C804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDHI_RDMAVBNDHI_SHIFT)
+#define I40E_GLPES_VFRDMAVBNDLO(_i) (0x0001C800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVBNDLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVBNDLO_RDMAVBNDLO_SHIFT)
+#define I40E_GLPES_VFRDMAVINVHI(_i) (0x0001CA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVHI_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVHI_RDMAVINVHI_SHIFT)
+#define I40E_GLPES_VFRDMAVINVLO(_i) (0x0001CA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRDMAVINVLO_MAX_INDEX 31
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT 0
+#define I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFRDMAVINVLO_RDMAVINVLO_SHIFT)
+#define I40E_GLPES_VFRXVLANERR(_i) (0x00018000 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFRXVLANERR_MAX_INDEX 31
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT 0
+#define I40E_GLPES_VFRXVLANERR_RXVLANERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFRXVLANERR_RXVLANERR_SHIFT)
+#define I40E_GLPES_VFTCPRTXSEG(_i) (0x0001B600 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRTXSEG_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT 0
+#define I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRTXSEG_TCPRTXSEG_SHIFT)
+#define I40E_GLPES_VFTCPRXOPTERR(_i) (0x0001B200 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXOPTERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXOPTERR_TCPRXOPTERR_SHIFT)
+#define I40E_GLPES_VFTCPRXPROTOERR(_i) (0x0001B300 + ((_i) * 4)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXPROTOERR_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT 0
+#define I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_MASK I40E_MASK(0xFFFFFF, I40E_GLPES_VFTCPRXPROTOERR_TCPRXPROTOERR_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSHI(_i) (0x0001B004 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPRXSEGSHI_TCPRXSEGSHI_SHIFT)
+#define I40E_GLPES_VFTCPRXSEGSLO(_i) (0x0001B000 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPRXSEGSLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT 0
+#define I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPRXSEGSLO_TCPRXSEGSLO_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGHI(_i) (0x0001B404 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGHI_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFTCPTXSEGHI_TCPTXSEGHI_SHIFT)
+#define I40E_GLPES_VFTCPTXSEGLO(_i) (0x0001B400 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFTCPTXSEGLO_MAX_INDEX 31
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT 0
+#define I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFTCPTXSEGLO_TCPTXSEGLO_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSHI(_i) (0x0001B804 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPRXPKTSHI_UDPRXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPRXPKTSLO(_i) (0x0001B800 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPRXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPRXPKTSLO_UDPRXPKTSLO_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSHI(_i) (0x0001BA04 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSHI_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_MASK I40E_MASK(0xFFFF, I40E_GLPES_VFUDPTXPKTSHI_UDPTXPKTSHI_SHIFT)
+#define I40E_GLPES_VFUDPTXPKTSLO(_i) (0x0001BA00 + ((_i) * 8)) /* _i=0...31 */ /* Reset: PE_CORER */
+#define I40E_GLPES_VFUDPTXPKTSLO_MAX_INDEX 31
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT 0
+#define I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_MASK I40E_MASK(0xFFFFFFFF, I40E_GLPES_VFUDPTXPKTSLO_UDPTXPKTSLO_SHIFT)
+#define I40E_GLGEN_PME_TO 0x000B81BC /* Reset: POR */
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT 0
+#define I40E_GLGEN_PME_TO_PME_TO_FOR_PE_MASK I40E_MASK(0x1, I40E_GLGEN_PME_TO_PME_TO_FOR_PE_SHIFT)
+#define I40E_GLQF_APBVT(_i) (0x00260000 + ((_i) * 4)) /* _i=0...2047 */ /* Reset: CORER */
+#define I40E_GLQF_APBVT_MAX_INDEX 2047
+#define I40E_GLQF_APBVT_APBVT_SHIFT 0
+#define I40E_GLQF_APBVT_APBVT_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_APBVT_APBVT_SHIFT)
+#define I40E_GLQF_FD_PCTYPES(_i) (0x00268000 + ((_i) * 4)) /* _i=0...63 */ /* Reset: POR */
+#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
+#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
+#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_FD_MSK_MAX_INDEX 1
+#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
+#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
+#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
+#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
+#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
+#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
+#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
+#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
+#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
+#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
+#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
+#define I40E_GLQF_ORT_MAX_INDEX 63
+#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
+#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
+#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
+#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
+#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
+#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
+#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
+#define I40E_GLQF_PIT_MAX_INDEX 23
+#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
+#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
+#define I40E_GLQF_PIT_FSIZE_SHIFT 5
+#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
+#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
+#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
+#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
+#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
+#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG 0x00270280 /* Reset: CORER */
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT 0
+#define I40E_GLQF_FDEVICTFLAG_TX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_TX_FLAGS_SHIFT)
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT 8
+#define I40E_GLQF_FDEVICTFLAG_RX_FLAGS_MASK I40E_MASK(0xFF, I40E_GLQF_FDEVICTFLAG_RX_FLAGS_SHIFT)
+#define I40E_PFQF_CTL_2 0x00270300 /* Reset: CORER */
+#define I40E_PFQF_CTL_2_PEHSIZE_SHIFT 0
+#define I40E_PFQF_CTL_2_PEHSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEHSIZE_SHIFT)
+#define I40E_PFQF_CTL_2_PEDSIZE_SHIFT 5
+#define I40E_PFQF_CTL_2_PEDSIZE_MASK I40E_MASK(0x1F, I40E_PFQF_CTL_2_PEDSIZE_SHIFT)
+/* Redefined for X722 family */
+#define I40E_X722_PFQF_HLUT(_i) (0x00240000 + ((_i) * 128)) /* _i=0...127 */ /* Reset: CORER */
+#define I40E_X722_PFQF_HLUT_MAX_INDEX 127
+#define I40E_X722_PFQF_HLUT_LUT0_SHIFT 0
+#define I40E_X722_PFQF_HLUT_LUT0_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT0_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT1_SHIFT 8
+#define I40E_X722_PFQF_HLUT_LUT1_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT1_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT2_SHIFT 16
+#define I40E_X722_PFQF_HLUT_LUT2_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT2_SHIFT)
+#define I40E_X722_PFQF_HLUT_LUT3_SHIFT 24
+#define I40E_X722_PFQF_HLUT_LUT3_MASK I40E_MASK(0x7F, I40E_X722_PFQF_HLUT_LUT3_SHIFT)
+#define I40E_PFQF_HREGION(_i) (0x00245400 + ((_i) * 128)) /* _i=0...7 */ /* Reset: CORER */
+#define I40E_PFQF_HREGION_MAX_INDEX 7
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT 0
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_0_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_0_SHIFT)
+#define I40E_PFQF_HREGION_REGION_0_SHIFT 1
+#define I40E_PFQF_HREGION_REGION_0_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_0_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT 4
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_1_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_1_SHIFT)
+#define I40E_PFQF_HREGION_REGION_1_SHIFT 5
+#define I40E_PFQF_HREGION_REGION_1_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_1_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT 8
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_2_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_2_SHIFT)
+#define I40E_PFQF_HREGION_REGION_2_SHIFT 9
+#define I40E_PFQF_HREGION_REGION_2_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_2_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT 12
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_3_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_3_SHIFT)
+#define I40E_PFQF_HREGION_REGION_3_SHIFT 13
+#define I40E_PFQF_HREGION_REGION_3_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_3_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT 16
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_4_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_4_SHIFT)
+#define I40E_PFQF_HREGION_REGION_4_SHIFT 17
+#define I40E_PFQF_HREGION_REGION_4_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_4_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT 20
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_5_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_5_SHIFT)
+#define I40E_PFQF_HREGION_REGION_5_SHIFT 21
+#define I40E_PFQF_HREGION_REGION_5_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_5_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT 24
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_6_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_6_SHIFT)
+#define I40E_PFQF_HREGION_REGION_6_SHIFT 25
+#define I40E_PFQF_HREGION_REGION_6_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_6_SHIFT)
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT 28
+#define I40E_PFQF_HREGION_OVERRIDE_ENA_7_MASK I40E_MASK(0x1, I40E_PFQF_HREGION_OVERRIDE_ENA_7_SHIFT)
+#define I40E_PFQF_HREGION_REGION_7_SHIFT 29
+#define I40E_PFQF_HREGION_REGION_7_MASK I40E_MASK(0x7, I40E_PFQF_HREGION_REGION_7_SHIFT)
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT 8
+#define I40E_VSIQF_CTL_RSS_LUT_TYPE_MASK I40E_MASK(0x1, I40E_VSIQF_CTL_RSS_LUT_TYPE_SHIFT)
+#define I40E_VSIQF_HKEY(_i, _VSI) (0x002A0000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...12, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HKEY_MAX_INDEX 12
+#define I40E_VSIQF_HKEY_KEY_0_SHIFT 0
+#define I40E_VSIQF_HKEY_KEY_0_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_0_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_1_SHIFT 8
+#define I40E_VSIQF_HKEY_KEY_1_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_1_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_2_SHIFT 16
+#define I40E_VSIQF_HKEY_KEY_2_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_2_SHIFT)
+#define I40E_VSIQF_HKEY_KEY_3_SHIFT 24
+#define I40E_VSIQF_HKEY_KEY_3_MASK I40E_MASK(0xFF, I40E_VSIQF_HKEY_KEY_3_SHIFT)
+#define I40E_VSIQF_HLUT(_i, _VSI) (0x00220000 + ((_i) * 2048 + (_VSI) * 4)) /* _i=0...15, _VSI=0...383 */ /* Reset: CORER */
+#define I40E_VSIQF_HLUT_MAX_INDEX 15
+#define I40E_VSIQF_HLUT_LUT0_SHIFT 0
+#define I40E_VSIQF_HLUT_LUT0_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT0_SHIFT)
+#define I40E_VSIQF_HLUT_LUT1_SHIFT 8
+#define I40E_VSIQF_HLUT_LUT1_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT1_SHIFT)
+#define I40E_VSIQF_HLUT_LUT2_SHIFT 16
+#define I40E_VSIQF_HLUT_LUT2_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT2_SHIFT)
+#define I40E_VSIQF_HLUT_LUT3_SHIFT 24
+#define I40E_VSIQF_HLUT_LUT3_MASK I40E_MASK(0xF, I40E_VSIQF_HLUT_LUT3_SHIFT)
+#define I40E_GLGEN_STAT_CLEAR 0x00390004 /* Reset: CORER */
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT 0
+#define I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_MASK I40E_MASK(0x1, I40E_GLGEN_STAT_CLEAR_GLGEN_STAT_CLEAR_SHIFT)
+#define I40E_GLGEN_STAT_HALT 0x00390000 /* Reset: CORER */
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT 0
+#define I40E_GLGEN_STAT_HALT_HALT_CELLS_MASK I40E_MASK(0x3FFFFFFF, I40E_GLGEN_STAT_HALT_HALT_CELLS_SHIFT)
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTL01_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTL01_WB_ON_ITR_SHIFT)
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT 30
+#define I40E_VFINT_DYN_CTLN1_WB_ON_ITR_MASK I40E_MASK(0x1, I40E_VFINT_DYN_CTLN1_WB_ON_ITR_SHIFT)
+#define I40E_VFPE_AEQALLOC1 0x0000A400 /* Reset: VFR */
+#define I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT 0
+#define I40E_VFPE_AEQALLOC1_AECOUNT_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_AEQALLOC1_AECOUNT_SHIFT)
+#define I40E_VFPE_CCQPHIGH1 0x00009800 /* Reset: VFR */
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT 0
+#define I40E_VFPE_CCQPHIGH1_PECCQPHIGH_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPHIGH1_PECCQPHIGH_SHIFT)
+#define I40E_VFPE_CCQPLOW1 0x0000AC00 /* Reset: VFR */
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT 0
+#define I40E_VFPE_CCQPLOW1_PECCQPLOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_CCQPLOW1_PECCQPLOW_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1 0x0000B800 /* Reset: VFR */
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT 0
+#define I40E_VFPE_CCQPSTATUS1_CCQP_DONE_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_DONE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT 4
+#define I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_MASK I40E_MASK(0x7, I40E_VFPE_CCQPSTATUS1_HMC_PROFILE_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT 16
+#define I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_MASK I40E_MASK(0x3F, I40E_VFPE_CCQPSTATUS1_RDMA_EN_VFS_SHIFT)
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT 31
+#define I40E_VFPE_CCQPSTATUS1_CCQP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CCQPSTATUS1_CCQP_ERR_SHIFT)
+#define I40E_VFPE_CQACK1 0x0000B000 /* Reset: VFR */
+#define I40E_VFPE_CQACK1_PECQID_SHIFT 0
+#define I40E_VFPE_CQACK1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQACK1_PECQID_SHIFT)
+#define I40E_VFPE_CQARM1 0x0000B400 /* Reset: VFR */
+#define I40E_VFPE_CQARM1_PECQID_SHIFT 0
+#define I40E_VFPE_CQARM1_PECQID_MASK I40E_MASK(0x1FFFF, I40E_VFPE_CQARM1_PECQID_SHIFT)
+#define I40E_VFPE_CQPDB1 0x0000BC00 /* Reset: VFR */
+#define I40E_VFPE_CQPDB1_WQHEAD_SHIFT 0
+#define I40E_VFPE_CQPDB1_WQHEAD_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPDB1_WQHEAD_SHIFT)
+#define I40E_VFPE_CQPERRCODES1 0x00009C00 /* Reset: VFR */
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT 0
+#define I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MINOR_CODE_SHIFT)
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT 16
+#define I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_MASK I40E_MASK(0xFFFF, I40E_VFPE_CQPERRCODES1_CQP_MAJOR_CODE_SHIFT)
+#define I40E_VFPE_CQPTAIL1 0x0000A000 /* Reset: VFR */
+#define I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT 0
+#define I40E_VFPE_CQPTAIL1_WQTAIL_MASK I40E_MASK(0x7FF, I40E_VFPE_CQPTAIL1_WQTAIL_SHIFT)
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT 31
+#define I40E_VFPE_CQPTAIL1_CQP_OP_ERR_MASK I40E_MASK(0x1, I40E_VFPE_CQPTAIL1_CQP_OP_ERR_SHIFT)
+#define I40E_VFPE_IPCONFIG01 0x00008C00 /* Reset: VFR */
+#define I40E_VFPE_IPCONFIG01_PEIPID_SHIFT 0
+#define I40E_VFPE_IPCONFIG01_PEIPID_MASK I40E_MASK(0xFFFF, I40E_VFPE_IPCONFIG01_PEIPID_SHIFT)
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT 16
+#define I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_MASK I40E_MASK(0x1, I40E_VFPE_IPCONFIG01_USEENTIREIDRANGE_SHIFT)
+#define I40E_VFPE_MRTEIDXMASK1 0x00009000 /* Reset: VFR */
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT 0
+#define I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_MASK I40E_MASK(0x1F, I40E_VFPE_MRTEIDXMASK1_MRTEIDXMASKBITS_SHIFT)
+#define I40E_VFPE_RCVUNEXPECTEDERROR1 0x00009400 /* Reset: VFR */
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT 0
+#define I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_MASK I40E_MASK(0xFFFFFF, I40E_VFPE_RCVUNEXPECTEDERROR1_TCP_RX_UNEXP_ERR_SHIFT)
+#define I40E_VFPE_TCPNOWTIMER1 0x0000A800 /* Reset: VFR */
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT 0
+#define I40E_VFPE_TCPNOWTIMER1_TCP_NOW_MASK I40E_MASK(0xFFFFFFFF, I40E_VFPE_TCPNOWTIMER1_TCP_NOW_SHIFT)
+#define I40E_VFPE_WQEALLOC1 0x0000C000 /* Reset: VFR */
+#define I40E_VFPE_WQEALLOC1_PEQPID_SHIFT 0
+#define I40E_VFPE_WQEALLOC1_PEQPID_MASK I40E_MASK(0x3FFFF, I40E_VFPE_WQEALLOC1_PEQPID_SHIFT)
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT 20
+#define I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_MASK I40E_MASK(0xFFF, I40E_VFPE_WQEALLOC1_WQE_DESC_INDEX_SHIFT)
+
#endif /* _I40E_REGISTER_H_ */
diff --git a/sys/dev/ixl/i40e_type.h b/sys/dev/ixl/i40e_type.h
index 57351cb0bb04..fa8c7192e99f 100644
--- a/sys/dev/ixl/i40e_type.h
+++ b/sys/dev/ixl/i40e_type.h
@@ -43,7 +43,6 @@
#include "i40e_lan_hmc.h"
#include "i40e_devids.h"
-#define UNREFERENCED_XPARAMETER
#define BIT(a) (1UL << (a))
#define BIT_ULL(a) (1ULL << (a))
@@ -147,8 +146,10 @@ enum i40e_debug_mask {
#define I40E_PCI_LINK_SPEED_5000 0x2
#define I40E_PCI_LINK_SPEED_8000 0x3
-#define I40E_MDIO_STCODE 0
-#define I40E_MDIO_OPCODE_ADDRESS 0
+#define I40E_MDIO_STCODE I40E_MASK(0, \
+ I40E_GLGEN_MSCA_STCODE_SHIFT)
+#define I40E_MDIO_OPCODE_ADDRESS I40E_MASK(0, \
+ I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
@@ -177,6 +178,7 @@ enum i40e_memcpy_type {
I40E_DMA_TO_NONDMA
};
+#define I40E_FW_API_VERSION_MINOR_X722 0x0005
#define I40E_FW_API_VERSION_MINOR_X710 0x0005
@@ -193,6 +195,8 @@ enum i40e_mac_type {
I40E_MAC_X710,
I40E_MAC_XL710,
I40E_MAC_VF,
+ I40E_MAC_X722,
+ I40E_MAC_X722_VF,
I40E_MAC_GENERIC,
};
@@ -275,50 +279,61 @@ struct i40e_link_status {
#define I40E_MODULE_TYPE_1000BASE_T 0x08
};
-enum i40e_aq_capabilities_phy_type {
- I40E_CAP_PHY_TYPE_SGMII = BIT(I40E_PHY_TYPE_SGMII),
- I40E_CAP_PHY_TYPE_1000BASE_KX = BIT(I40E_PHY_TYPE_1000BASE_KX),
- I40E_CAP_PHY_TYPE_10GBASE_KX4 = BIT(I40E_PHY_TYPE_10GBASE_KX4),
- I40E_CAP_PHY_TYPE_10GBASE_KR = BIT(I40E_PHY_TYPE_10GBASE_KR),
- I40E_CAP_PHY_TYPE_40GBASE_KR4 = BIT(I40E_PHY_TYPE_40GBASE_KR4),
- I40E_CAP_PHY_TYPE_XAUI = BIT(I40E_PHY_TYPE_XAUI),
- I40E_CAP_PHY_TYPE_XFI = BIT(I40E_PHY_TYPE_XFI),
- I40E_CAP_PHY_TYPE_SFI = BIT(I40E_PHY_TYPE_SFI),
- I40E_CAP_PHY_TYPE_XLAUI = BIT(I40E_PHY_TYPE_XLAUI),
- I40E_CAP_PHY_TYPE_XLPPI = BIT(I40E_PHY_TYPE_XLPPI),
- I40E_CAP_PHY_TYPE_40GBASE_CR4_CU = BIT(I40E_PHY_TYPE_40GBASE_CR4_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1_CU = BIT(I40E_PHY_TYPE_10GBASE_CR1_CU),
- I40E_CAP_PHY_TYPE_10GBASE_AOC = BIT(I40E_PHY_TYPE_10GBASE_AOC),
- I40E_CAP_PHY_TYPE_40GBASE_AOC = BIT(I40E_PHY_TYPE_40GBASE_AOC),
- I40E_CAP_PHY_TYPE_100BASE_TX = BIT(I40E_PHY_TYPE_100BASE_TX),
- I40E_CAP_PHY_TYPE_1000BASE_T = BIT(I40E_PHY_TYPE_1000BASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_T = BIT(I40E_PHY_TYPE_10GBASE_T),
- I40E_CAP_PHY_TYPE_10GBASE_SR = BIT(I40E_PHY_TYPE_10GBASE_SR),
- I40E_CAP_PHY_TYPE_10GBASE_LR = BIT(I40E_PHY_TYPE_10GBASE_LR),
- I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU = BIT(I40E_PHY_TYPE_10GBASE_SFPP_CU),
- I40E_CAP_PHY_TYPE_10GBASE_CR1 = BIT(I40E_PHY_TYPE_10GBASE_CR1),
- I40E_CAP_PHY_TYPE_40GBASE_CR4 = BIT(I40E_PHY_TYPE_40GBASE_CR4),
- I40E_CAP_PHY_TYPE_40GBASE_SR4 = BIT(I40E_PHY_TYPE_40GBASE_SR4),
- I40E_CAP_PHY_TYPE_40GBASE_LR4 = BIT(I40E_PHY_TYPE_40GBASE_LR4),
- I40E_CAP_PHY_TYPE_1000BASE_SX = BIT(I40E_PHY_TYPE_1000BASE_SX),
- I40E_CAP_PHY_TYPE_1000BASE_LX = BIT(I40E_PHY_TYPE_1000BASE_LX),
- I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL = BIT(I40E_PHY_TYPE_1000BASE_T_OPTICAL),
- I40E_CAP_PHY_TYPE_20GBASE_KR2 = BIT(I40E_PHY_TYPE_20GBASE_KR2)
-};
-
struct i40e_phy_info {
struct i40e_link_status link_info;
struct i40e_link_status link_info_old;
bool get_link_info;
enum i40e_media_type media_type;
/* all the phy types the NVM is capable of */
- u32 phy_types;
-};
-
+ u64 phy_types;
+};
+
+#define I40E_CAP_PHY_TYPE_SGMII BIT_ULL(I40E_PHY_TYPE_SGMII)
+#define I40E_CAP_PHY_TYPE_1000BASE_KX BIT_ULL(I40E_PHY_TYPE_1000BASE_KX)
+#define I40E_CAP_PHY_TYPE_10GBASE_KX4 BIT_ULL(I40E_PHY_TYPE_10GBASE_KX4)
+#define I40E_CAP_PHY_TYPE_10GBASE_KR BIT_ULL(I40E_PHY_TYPE_10GBASE_KR)
+#define I40E_CAP_PHY_TYPE_40GBASE_KR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_KR4)
+#define I40E_CAP_PHY_TYPE_XAUI BIT_ULL(I40E_PHY_TYPE_XAUI)
+#define I40E_CAP_PHY_TYPE_XFI BIT_ULL(I40E_PHY_TYPE_XFI)
+#define I40E_CAP_PHY_TYPE_SFI BIT_ULL(I40E_PHY_TYPE_SFI)
+#define I40E_CAP_PHY_TYPE_XLAUI BIT_ULL(I40E_PHY_TYPE_XLAUI)
+#define I40E_CAP_PHY_TYPE_XLPPI BIT_ULL(I40E_PHY_TYPE_XLPPI)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4_CU BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_AOC BIT_ULL(I40E_PHY_TYPE_10GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_40GBASE_AOC BIT_ULL(I40E_PHY_TYPE_40GBASE_AOC)
+#define I40E_CAP_PHY_TYPE_100BASE_TX BIT_ULL(I40E_PHY_TYPE_100BASE_TX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T BIT_ULL(I40E_PHY_TYPE_1000BASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_T BIT_ULL(I40E_PHY_TYPE_10GBASE_T)
+#define I40E_CAP_PHY_TYPE_10GBASE_SR BIT_ULL(I40E_PHY_TYPE_10GBASE_SR)
+#define I40E_CAP_PHY_TYPE_10GBASE_LR BIT_ULL(I40E_PHY_TYPE_10GBASE_LR)
+#define I40E_CAP_PHY_TYPE_10GBASE_SFPP_CU BIT_ULL(I40E_PHY_TYPE_10GBASE_SFPP_CU)
+#define I40E_CAP_PHY_TYPE_10GBASE_CR1 BIT_ULL(I40E_PHY_TYPE_10GBASE_CR1)
+#define I40E_CAP_PHY_TYPE_40GBASE_CR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_CR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_SR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_SR4)
+#define I40E_CAP_PHY_TYPE_40GBASE_LR4 BIT_ULL(I40E_PHY_TYPE_40GBASE_LR4)
+#define I40E_CAP_PHY_TYPE_1000BASE_SX BIT_ULL(I40E_PHY_TYPE_1000BASE_SX)
+#define I40E_CAP_PHY_TYPE_1000BASE_LX BIT_ULL(I40E_PHY_TYPE_1000BASE_LX)
+#define I40E_CAP_PHY_TYPE_1000BASE_T_OPTICAL \
+ BIT_ULL(I40E_PHY_TYPE_1000BASE_T_OPTICAL)
+#define I40E_CAP_PHY_TYPE_20GBASE_KR2 BIT_ULL(I40E_PHY_TYPE_20GBASE_KR2)
+#define I40E_CAP_PHY_TYPE_25GBASE_KR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_KR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_CR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_CR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_SR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_SR + 32)
+#define I40E_CAP_PHY_TYPE_25GBASE_LR BIT_ULL(I40E_AQ_PHY_TYPE_EXT_25G_LR + 32)
#define I40E_HW_CAP_MAX_GPIO 30
#define I40E_HW_CAP_MDIO_PORT_MODE_MDIO 0
#define I40E_HW_CAP_MDIO_PORT_MODE_I2C 1
+enum i40e_acpi_programming_method {
+ I40E_ACPI_PROGRAMMING_METHOD_HW_FVL = 0,
+ I40E_ACPI_PROGRAMMING_METHOD_AQC_FPK = 1
+};
+
+#define I40E_WOL_SUPPORT_MASK 1
+#define I40E_ACPI_PROGRAMMING_METHOD_MASK (1 << 1)
+#define I40E_PROXY_SUPPORT_MASK (1 << 2)
+
/* Capabilities of a PF or a VF or the whole device */
struct i40e_hw_capabilities {
u32 switch_mode;
@@ -348,6 +363,11 @@ struct i40e_hw_capabilities {
#define I40E_FLEX10_STATUS_DCC_ERROR 0x1
#define I40E_FLEX10_STATUS_VC_MODE 0x2
+ bool sec_rev_disabled;
+ bool update_disabled;
+#define I40E_NVM_MGMT_SEC_REV_DISABLED 0x1
+#define I40E_NVM_MGMT_UPDATE_DISABLED 0x2
+
bool mgmt_cem;
bool ieee_1588;
bool iwarp;
@@ -377,6 +397,9 @@ struct i40e_hw_capabilities {
u32 enabled_tcmap;
u32 maxtc;
u64 wr_csr_prot;
+ bool apm_wol_support;
+ enum i40e_acpi_programming_method acpi_prog_method;
+ bool proxy_support;
};
struct i40e_mac_info {
@@ -622,6 +645,8 @@ struct i40e_hw {
enum i40e_nvmupd_state nvmupd_state;
struct i40e_aq_desc nvm_wb_desc;
struct i40e_virt_mem nvm_buff;
+ bool nvm_release_on_done;
+ u16 nvm_wait_opcode;
/* HMC info */
struct i40e_hmc_info hmc; /* HMC info struct */
@@ -634,6 +659,10 @@ struct i40e_hw {
struct i40e_dcbx_config remote_dcbx_config; /* Peer Cfg */
struct i40e_dcbx_config desired_dcbx_config; /* CEE Desired Cfg */
+ /* WoL and proxy support */
+ u16 num_wol_proxy_filters;
+ u16 wol_proxy_vsi_seid;
+
#define I40E_HW_FLAG_AQ_SRCTL_ACCESS_ENABLE BIT_ULL(0)
u64 flags;
@@ -644,7 +673,8 @@ struct i40e_hw {
static INLINE bool i40e_is_vf(struct i40e_hw *hw)
{
- return hw->mac.type == I40E_MAC_VF;
+ return (hw->mac.type == I40E_MAC_VF ||
+ hw->mac.type == I40E_MAC_X722_VF);
}
struct i40e_driver_version {
@@ -748,7 +778,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_CRCP_SHIFT = 4,
I40E_RX_DESC_STATUS_TSYNINDX_SHIFT = 5, /* 2 BITS */
I40E_RX_DESC_STATUS_TSYNVALID_SHIFT = 7,
- I40E_RX_DESC_STATUS_RESERVED1_SHIFT = 8,
+ I40E_RX_DESC_STATUS_EXT_UDP_0_SHIFT = 8,
I40E_RX_DESC_STATUS_UMBCAST_SHIFT = 9, /* 2 BITS */
I40E_RX_DESC_STATUS_FLM_SHIFT = 11,
@@ -756,7 +786,7 @@ enum i40e_rx_desc_status_bits {
I40E_RX_DESC_STATUS_LPBK_SHIFT = 14,
I40E_RX_DESC_STATUS_IPV6EXADD_SHIFT = 15,
I40E_RX_DESC_STATUS_RESERVED2_SHIFT = 16, /* 2 BITS */
- I40E_RX_DESC_STATUS_UDP_0_SHIFT = 18,
+ I40E_RX_DESC_STATUS_INT_UDP_0_SHIFT = 18,
I40E_RX_DESC_STATUS_LAST /* this entry must be last!!! */
};
@@ -1134,6 +1164,8 @@ enum i40e_tx_ctx_desc_eipt_offload {
#define I40E_TXD_CTX_QW0_DECTTL_MASK (0xFULL << \
I40E_TXD_CTX_QW0_DECTTL_SHIFT)
+#define I40E_TXD_CTX_QW0_L4T_CS_SHIFT 23
+#define I40E_TXD_CTX_QW0_L4T_CS_MASK BIT_ULL(I40E_TXD_CTX_QW0_L4T_CS_SHIFT)
struct i40e_nop_desc {
__le64 rsvd;
__le64 dtype_cmd;
@@ -1170,15 +1202,24 @@ struct i40e_filter_program_desc {
/* Packet Classifier Types for filters */
enum i40e_filter_pctype {
- /* Note: Values 0-30 are reserved for future use */
+ /* Note: Values 0-28 are reserved for future use.
+ * Value 29, 30, 32 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP = 29,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP = 30,
I40E_FILTER_PCTYPE_NONF_IPV4_UDP = 31,
- /* Note: Value 32 is reserved for future use */
+ I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK = 32,
I40E_FILTER_PCTYPE_NONF_IPV4_TCP = 33,
I40E_FILTER_PCTYPE_NONF_IPV4_SCTP = 34,
I40E_FILTER_PCTYPE_NONF_IPV4_OTHER = 35,
I40E_FILTER_PCTYPE_FRAG_IPV4 = 36,
- /* Note: Values 37-40 are reserved for future use */
+ /* Note: Values 37-38 are reserved for future use.
+ * Value 39, 40, 42 are not supported on XL710 and X710.
+ */
+ I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP = 39,
+ I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP = 40,
I40E_FILTER_PCTYPE_NONF_IPV6_UDP = 41,
+ I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK = 42,
I40E_FILTER_PCTYPE_NONF_IPV6_TCP = 43,
I40E_FILTER_PCTYPE_NONF_IPV6_SCTP = 44,
I40E_FILTER_PCTYPE_NONF_IPV6_OTHER = 45,
@@ -1234,6 +1275,10 @@ enum i40e_filter_program_desc_pcmd {
#define I40E_TXD_FLTR_QW1_FD_STATUS_MASK (0x3ULL << \
I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_SHIFT (0xEULL + \
+ I40E_TXD_FLTR_QW1_CMD_SHIFT)
+#define I40E_TXD_FLTR_QW1_ATR_MASK BIT_ULL(I40E_TXD_FLTR_QW1_ATR_SHIFT)
+
#define I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT 20
#define I40E_TXD_FLTR_QW1_CNTINDEX_MASK (0x1FFUL << \
I40E_TXD_FLTR_QW1_CNTINDEX_SHIFT)
@@ -1544,4 +1589,37 @@ struct i40e_lldp_variables {
/* RSS Hash Table Size */
#define I40E_PFQF_CTL_0_HASHLUTSIZE_512 0x00010000
+
+/* INPUT SET MASK for RSS, flow director, and flexible payload */
+#define I40E_L3_SRC_SHIFT 47
+#define I40E_L3_SRC_MASK (0x3ULL << I40E_L3_SRC_SHIFT)
+#define I40E_L3_V6_SRC_SHIFT 43
+#define I40E_L3_V6_SRC_MASK (0xFFULL << I40E_L3_V6_SRC_SHIFT)
+#define I40E_L3_DST_SHIFT 35
+#define I40E_L3_DST_MASK (0x3ULL << I40E_L3_DST_SHIFT)
+#define I40E_L3_V6_DST_SHIFT 35
+#define I40E_L3_V6_DST_MASK (0xFFULL << I40E_L3_V6_DST_SHIFT)
+#define I40E_L4_SRC_SHIFT 34
+#define I40E_L4_SRC_MASK (0x1ULL << I40E_L4_SRC_SHIFT)
+#define I40E_L4_DST_SHIFT 33
+#define I40E_L4_DST_MASK (0x1ULL << I40E_L4_DST_SHIFT)
+#define I40E_VERIFY_TAG_SHIFT 31
+#define I40E_VERIFY_TAG_MASK (0x3ULL << I40E_VERIFY_TAG_SHIFT)
+
+#define I40E_FLEX_50_SHIFT 13
+#define I40E_FLEX_50_MASK (0x1ULL << I40E_FLEX_50_SHIFT)
+#define I40E_FLEX_51_SHIFT 12
+#define I40E_FLEX_51_MASK (0x1ULL << I40E_FLEX_51_SHIFT)
+#define I40E_FLEX_52_SHIFT 11
+#define I40E_FLEX_52_MASK (0x1ULL << I40E_FLEX_52_SHIFT)
+#define I40E_FLEX_53_SHIFT 10
+#define I40E_FLEX_53_MASK (0x1ULL << I40E_FLEX_53_SHIFT)
+#define I40E_FLEX_54_SHIFT 9
+#define I40E_FLEX_54_MASK (0x1ULL << I40E_FLEX_54_SHIFT)
+#define I40E_FLEX_55_SHIFT 8
+#define I40E_FLEX_55_MASK (0x1ULL << I40E_FLEX_55_SHIFT)
+#define I40E_FLEX_56_SHIFT 7
+#define I40E_FLEX_56_MASK (0x1ULL << I40E_FLEX_56_SHIFT)
+#define I40E_FLEX_57_SHIFT 6
+#define I40E_FLEX_57_MASK (0x1ULL << I40E_FLEX_57_SHIFT)
#endif /* _I40E_TYPE_H_ */
diff --git a/sys/dev/ixl/i40e_virtchnl.h b/sys/dev/ixl/i40e_virtchnl.h
index 7939a0dc5732..4ebe578d1972 100644
--- a/sys/dev/ixl/i40e_virtchnl.h
+++ b/sys/dev/ixl/i40e_virtchnl.h
@@ -88,7 +88,12 @@ enum i40e_virtchnl_ops {
I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE = 14,
I40E_VIRTCHNL_OP_GET_STATS = 15,
I40E_VIRTCHNL_OP_FCOE = 16,
- I40E_VIRTCHNL_OP_EVENT = 17,
+ I40E_VIRTCHNL_OP_EVENT = 17, /* must ALWAYS be 17 */
+ I40E_VIRTCHNL_OP_CONFIG_RSS_KEY = 23,
+ I40E_VIRTCHNL_OP_CONFIG_RSS_LUT = 24,
+ I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS = 25,
+ I40E_VIRTCHNL_OP_SET_RSS_HENA = 26,
+
};
/* Virtual channel message descriptor. This overlays the admin queue
@@ -162,6 +167,7 @@ struct i40e_virtchnl_vsi_resource {
#define I40E_VIRTCHNL_VF_OFFLOAD_VLAN 0x00010000
#define I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING 0x00020000
#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2 0x00040000
+#define I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF 0X00080000
struct i40e_virtchnl_vf_resource {
u16 num_vsis;
@@ -170,8 +176,8 @@ struct i40e_virtchnl_vf_resource {
u16 max_mtu;
u32 vf_offload_flags;
- u32 max_fcoe_contexts;
- u32 max_fcoe_filters;
+ u32 rss_key_size;
+ u32 rss_lut_size;
struct i40e_virtchnl_vsi_resource vsi_res[1];
};
@@ -330,6 +336,39 @@ struct i40e_virtchnl_promisc_info {
* PF replies with struct i40e_eth_stats in an external buffer.
*/
+/* I40E_VIRTCHNL_OP_CONFIG_RSS_KEY
+ * I40E_VIRTCHNL_OP_CONFIG_RSS_LUT
+ * VF sends these messages to configure RSS. Only supported if both PF
+ * and VF drivers set the I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF bit during
+ * configuration negotiation. If this is the case, then the rss fields in
+ * the vf resource struct are valid.
+ * Both the key and LUT are initialized to 0 by the PF, meaning that
+ * RSS is effectively disabled until set up by the VF.
+ */
+struct i40e_virtchnl_rss_key {
+ u16 vsi_id;
+ u16 key_len;
+ u8 key[1]; /* RSS hash key, packed bytes */
+};
+
+struct i40e_virtchnl_rss_lut {
+ u16 vsi_id;
+ u16 lut_entries;
+ u8 lut[1]; /* RSS lookup table*/
+};
+
+/* I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS
+ * I40E_VIRTCHNL_OP_SET_RSS_HENA
+ * VF sends these messages to get and set the hash filter enable bits for RSS.
+ * By default, the PF sets these to all possible traffic types that the
+ * hardware supports. The VF can query this value if it wants to change the
+ * traffic types that are hashed by the hardware.
+ * Traffic types are defined in the i40e_filter_pctype enum in i40e_type.h
+ */
+struct i40e_virtchnl_rss_hena {
+ u64 hena;
+};
+
/* I40E_VIRTCHNL_OP_EVENT
* PF sends this message to inform the VF driver of events that may affect it.
* No direct response is expected from the VF, though it may generate other
diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c
index 8e9ba80987ae..a9221d323a26 100644
--- a/sys/dev/ixl/if_ixl.c
+++ b/sys/dev/ixl/if_ixl.c
@@ -32,23 +32,17 @@
******************************************************************************/
/*$FreeBSD$*/
-#ifndef IXL_STANDALONE_BUILD
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#include "opt_rss.h"
-#endif
-
#include "ixl.h"
#include "ixl_pf.h"
-#ifdef RSS
-#include <net/rss_config.h>
+#ifdef PCI_IOV
+#include "ixl_pf_iov.h"
#endif
/*********************************************************************
* Driver version
*********************************************************************/
-char ixl_driver_version[] = "1.4.27-k";
+char ixl_driver_version[] = "1.6.6-k";
/*********************************************************************
* PCI Device ID Table
@@ -70,6 +64,12 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T_X722, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_I_X722, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -79,7 +79,7 @@ static ixl_vendor_info_t ixl_vendor_info_array[] =
*********************************************************************/
static char *ixl_strings[] = {
- "Intel(R) Ethernet Connection XL710 Driver"
+ "Intel(R) Ethernet Connection XL710/X722 Driver"
};
@@ -90,146 +90,9 @@ static int ixl_probe(device_t);
static int ixl_attach(device_t);
static int ixl_detach(device_t);
static int ixl_shutdown(device_t);
-static int ixl_get_hw_capabilities(struct ixl_pf *);
-static void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
-static int ixl_ioctl(struct ifnet *, u_long, caddr_t);
-static void ixl_init(void *);
-static void ixl_init_locked(struct ixl_pf *);
-static void ixl_stop(struct ixl_pf *);
-static void ixl_stop_locked(struct ixl_pf *);
-static void ixl_media_status(struct ifnet *, struct ifmediareq *);
-static int ixl_media_change(struct ifnet *);
-static void ixl_update_link_status(struct ixl_pf *);
-static int ixl_allocate_pci_resources(struct ixl_pf *);
-static u16 ixl_get_bus_info(struct i40e_hw *, device_t);
-static int ixl_setup_stations(struct ixl_pf *);
-static int ixl_switch_config(struct ixl_pf *);
-static int ixl_initialize_vsi(struct ixl_vsi *);
-
-static int ixl_setup_adminq_msix(struct ixl_pf *);
-static int ixl_setup_adminq_tq(struct ixl_pf *);
-static int ixl_setup_queue_msix(struct ixl_vsi *);
-static int ixl_setup_queue_tqs(struct ixl_vsi *);
-static int ixl_teardown_adminq_msix(struct ixl_pf *);
-static int ixl_teardown_queue_msix(struct ixl_vsi *);
-static void ixl_configure_intr0_msix(struct ixl_pf *);
-static void ixl_configure_queue_intr_msix(struct ixl_pf *);
-static void ixl_free_queue_tqs(struct ixl_vsi *);
-static void ixl_free_adminq_tq(struct ixl_pf *);
-
-static int ixl_assign_vsi_legacy(struct ixl_pf *);
-static int ixl_init_msix(struct ixl_pf *);
-static void ixl_configure_itr(struct ixl_pf *);
-static void ixl_configure_legacy(struct ixl_pf *);
-static void ixl_free_pci_resources(struct ixl_pf *);
-static void ixl_local_timer(void *);
-static int ixl_setup_interface(device_t, struct ixl_vsi *);
-static void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
-static void ixl_config_rss(struct ixl_vsi *);
-static void ixl_set_queue_rx_itr(struct ixl_queue *);
-static void ixl_set_queue_tx_itr(struct ixl_queue *);
-static int ixl_set_advertised_speeds(struct ixl_pf *, int);
-static void ixl_get_initial_advertised_speeds(struct ixl_pf *);
-
-static int ixl_enable_rings(struct ixl_vsi *);
-static int ixl_disable_rings(struct ixl_vsi *);
-static void ixl_enable_intr(struct ixl_vsi *);
-static void ixl_disable_intr(struct ixl_vsi *);
-static void ixl_disable_rings_intr(struct ixl_vsi *);
-
-static void ixl_enable_adminq(struct i40e_hw *);
-static void ixl_disable_adminq(struct i40e_hw *);
-static void ixl_enable_queue(struct i40e_hw *, int);
-static void ixl_disable_queue(struct i40e_hw *, int);
-static void ixl_enable_legacy(struct i40e_hw *);
-static void ixl_disable_legacy(struct i40e_hw *);
-
-static void ixl_set_promisc(struct ixl_vsi *);
-static void ixl_add_multi(struct ixl_vsi *);
-static void ixl_del_multi(struct ixl_vsi *);
-static void ixl_register_vlan(void *, struct ifnet *, u16);
-static void ixl_unregister_vlan(void *, struct ifnet *, u16);
-static void ixl_setup_vlan_filters(struct ixl_vsi *);
-
-static void ixl_init_filters(struct ixl_vsi *);
-static void ixl_reconfigure_filters(struct ixl_vsi *vsi);
-static void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
-static void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
-static void ixl_add_hw_filters(struct ixl_vsi *, int, int);
-static void ixl_del_hw_filters(struct ixl_vsi *, int);
-static struct ixl_mac_filter *
- ixl_find_filter(struct ixl_vsi *, u8 *, s16);
-static void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
-static void ixl_free_mac_filters(struct ixl_vsi *vsi);
-/* Sysctls*/
-static void ixl_add_device_sysctls(struct ixl_pf *);
-
-static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
-static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
-static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
-
-#ifdef IXL_DEBUG_SYSCTL
-static int ixl_debug_info(SYSCTL_HANDLER_ARGS);
-static void ixl_print_debug_info(struct ixl_pf *);
-
-static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
-static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
-#endif
-
-/* The MSI/X Interrupt handlers */
-static void ixl_intr(void *);
-static void ixl_msix_que(void *);
-static void ixl_msix_adminq(void *);
-static void ixl_handle_mdd_event(struct ixl_pf *);
-
-/* Deferred interrupt tasklets */
-static void ixl_do_adminq(void *, int);
-
-/* Statistics */
-static void ixl_add_hw_stats(struct ixl_pf *);
-static void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
- struct sysctl_oid_list *, struct i40e_hw_port_stats *);
-static void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
- struct sysctl_oid_list *,
- struct i40e_eth_stats *);
-static void ixl_update_stats_counters(struct ixl_pf *);
-static void ixl_update_eth_stats(struct ixl_vsi *);
-static void ixl_update_vsi_stats(struct ixl_vsi *);
-static void ixl_pf_reset_stats(struct ixl_pf *);
-static void ixl_vsi_reset_stats(struct ixl_vsi *);
-static void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
- u64 *, u64 *);
-static void ixl_stat_update32(struct i40e_hw *, u32, bool,
- u64 *, u64 *);
-/* NVM update */
-static int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
-static void ixl_handle_empr_reset(struct ixl_pf *);
-static int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
-
-/* Debug helper functions */
-#ifdef IXL_DEBUG
-static void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
-#endif
-
-#ifdef PCI_IOV
-static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
-
-static int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t*);
-static void ixl_iov_uninit(device_t dev);
-static int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t*);
-
-static void ixl_handle_vf_msg(struct ixl_pf *,
- struct i40e_arq_event_info *);
-static void ixl_handle_vflr(void *arg, int pending);
-
-static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
-static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
-#endif
+static int ixl_save_pf_tunables(struct ixl_pf *);
+static int ixl_attach_get_link_status(struct ixl_pf *);
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -263,11 +126,6 @@ MODULE_DEPEND(ixl, netmap, 1, 1, 1);
#endif /* DEV_NETMAP */
/*
-** Global reset mutex
-*/
-static struct mtx ixl_reset_mtx;
-
-/*
** TUNEABLE PARAMETERS:
*/
@@ -287,73 +145,72 @@ SYSCTL_INT(_hw_ixl, OID_AUTO, enable_msix, CTLFLAG_RDTUN, &ixl_enable_msix, 0,
** Number of descriptors per ring:
** - TX and RX are the same size
*/
-static int ixl_ringsz = DEFAULT_RING;
-TUNABLE_INT("hw.ixl.ringsz", &ixl_ringsz);
+static int ixl_ring_size = DEFAULT_RING;
+TUNABLE_INT("hw.ixl.ring_size", &ixl_ring_size);
SYSCTL_INT(_hw_ixl, OID_AUTO, ring_size, CTLFLAG_RDTUN,
- &ixl_ringsz, 0, "Descriptor Ring Size");
+ &ixl_ring_size, 0, "Descriptor Ring Size");
/*
** This can be set manually, if left as 0 the
** number of queues will be calculated based
** on cpus and msix vectors available.
*/
-int ixl_max_queues = 0;
+static int ixl_max_queues = 0;
TUNABLE_INT("hw.ixl.max_queues", &ixl_max_queues);
SYSCTL_INT(_hw_ixl, OID_AUTO, max_queues, CTLFLAG_RDTUN,
&ixl_max_queues, 0, "Number of Queues");
+static int ixl_enable_tx_fc_filter = 1;
+TUNABLE_INT("hw.ixl.enable_tx_fc_filter",
+ &ixl_enable_tx_fc_filter);
+SYSCTL_INT(_hw_ixl, OID_AUTO, enable_tx_fc_filter, CTLFLAG_RDTUN,
+ &ixl_enable_tx_fc_filter, 0,
+ "Filter out packets with Ethertype 0x8808 from being sent out by non-HW sources");
+
+static int ixl_core_debug_mask = 0;
+TUNABLE_INT("hw.ixl.core_debug_mask",
+ &ixl_core_debug_mask);
+SYSCTL_INT(_hw_ixl, OID_AUTO, core_debug_mask, CTLFLAG_RDTUN,
+ &ixl_core_debug_mask, 0,
+ "Display debug statements that are printed in non-shared code");
+
+static int ixl_shared_debug_mask = 0;
+TUNABLE_INT("hw.ixl.shared_debug_mask",
+ &ixl_shared_debug_mask);
+SYSCTL_INT(_hw_ixl, OID_AUTO, shared_debug_mask, CTLFLAG_RDTUN,
+ &ixl_shared_debug_mask, 0,
+ "Display debug statements that are printed in shared code");
+
/*
** Controls for Interrupt Throttling
** - true/false for dynamic adjustment
** - default values for static ITR
*/
-int ixl_dynamic_rx_itr = 1;
+static int ixl_dynamic_rx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_rx_itr", &ixl_dynamic_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_rx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_rx_itr, 0, "Dynamic RX Interrupt Rate");
-int ixl_dynamic_tx_itr = 1;
+static int ixl_dynamic_tx_itr = 1;
TUNABLE_INT("hw.ixl.dynamic_tx_itr", &ixl_dynamic_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, dynamic_tx_itr, CTLFLAG_RDTUN,
&ixl_dynamic_tx_itr, 0, "Dynamic TX Interrupt Rate");
-int ixl_rx_itr = IXL_ITR_8K;
+static int ixl_rx_itr = IXL_ITR_8K;
TUNABLE_INT("hw.ixl.rx_itr", &ixl_rx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, rx_itr, CTLFLAG_RDTUN,
&ixl_rx_itr, 0, "RX Interrupt Rate");
-int ixl_tx_itr = IXL_ITR_4K;
+static int ixl_tx_itr = IXL_ITR_4K;
TUNABLE_INT("hw.ixl.tx_itr", &ixl_tx_itr);
SYSCTL_INT(_hw_ixl, OID_AUTO, tx_itr, CTLFLAG_RDTUN,
&ixl_tx_itr, 0, "TX Interrupt Rate");
-#ifdef IXL_FDIR
-static int ixl_enable_fdir = 1;
-TUNABLE_INT("hw.ixl.enable_fdir", &ixl_enable_fdir);
-/* Rate at which we sample */
-int ixl_atr_rate = 20;
-TUNABLE_INT("hw.ixl.atr_rate", &ixl_atr_rate);
-#endif
-
#ifdef DEV_NETMAP
#define NETMAP_IXL_MAIN /* only bring in one part of the netmap code */
#include <dev/netmap/if_ixl_netmap.h>
#endif /* DEV_NETMAP */
-static char *ixl_fc_string[6] = {
- "None",
- "Rx",
- "Tx",
- "Full",
- "Priority",
- "Default"
-};
-
-static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
-
-static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
- {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
-
/*********************************************************************
* Device identification routine
*
@@ -371,7 +228,6 @@ ixl_probe(device_t dev)
u16 pci_vendor_id, pci_device_id;
u16 pci_subvendor_id, pci_subdevice_id;
char device_name[256];
- static bool lock_init = FALSE;
#if 0
INIT_DEBUGOUT("ixl_probe: begin");
@@ -398,13 +254,6 @@ ixl_probe(device_t dev)
ixl_strings[ent->index],
ixl_driver_version);
device_set_desc_copy(dev, device_name);
- /* One shot mutex init */
- if (lock_init == FALSE) {
- lock_init = TRUE;
- mtx_init(&ixl_reset_mtx,
- "ixl_reset",
- "IXL RESET Lock", MTX_DEF);
- }
return (BUS_PROBE_DEFAULT);
}
ent++;
@@ -412,6 +261,64 @@ ixl_probe(device_t dev)
return (ENXIO);
}
+static int
+ixl_attach_get_link_status(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "link restart failed, aq_err=%d\n",
+ pf->hw.aq.asq_last_status);
+ return error;
+ }
+ }
+
+ /* Determine link state */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ return (0);
+}
+
+/*
+ * Sanity check and save off tunable values.
+ */
+static int
+ixl_save_pf_tunables(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+
+ /* Save tunable information */
+ pf->enable_msix = ixl_enable_msix;
+ pf->max_queues = ixl_max_queues;
+ pf->ringsz = ixl_ring_size;
+ pf->enable_tx_fc_filter = ixl_enable_tx_fc_filter;
+ pf->dynamic_rx_itr = ixl_dynamic_rx_itr;
+ pf->dynamic_tx_itr = ixl_dynamic_tx_itr;
+ pf->tx_itr = ixl_tx_itr;
+ pf->rx_itr = ixl_rx_itr;
+ pf->dbg_mask = ixl_core_debug_mask;
+ pf->hw.debug_mask = ixl_shared_debug_mask;
+
+ if (ixl_ring_size < IXL_MIN_RING
+ || ixl_ring_size > IXL_MAX_RING
+ || ixl_ring_size % IXL_RING_INCREMENT != 0) {
+ device_printf(dev, "Invalid ring_size value of %d set!\n",
+ ixl_ring_size);
+ device_printf(dev, "ring_size must be between %d and %d, "
+ "inclusive, and must be a multiple of %d\n",
+ IXL_MIN_RING, IXL_MAX_RING, IXL_RING_INCREMENT);
+ return (EINVAL);
+ }
+
+ return (0);
+}
+
/*********************************************************************
* Device initialization routine
*
@@ -428,12 +335,8 @@ ixl_attach(device_t dev)
struct ixl_pf *pf;
struct i40e_hw *hw;
struct ixl_vsi *vsi;
- u16 bus;
+ enum i40e_status_code status;
int error = 0;
-#ifdef PCI_IOV
- nvlist_t *pf_schema, *vf_schema;
- int iov_error;
-#endif
INIT_DEBUGOUT("ixl_attach: begin");
@@ -449,26 +352,17 @@ ixl_attach(device_t dev)
vsi = &pf->vsi;
vsi->dev = pf->dev;
+ /* Save tunable values */
+ error = ixl_save_pf_tunables(pf);
+ if (error)
+ return (error);
+
/* Core Lock Init*/
IXL_PF_LOCK_INIT(pf, device_get_nameunit(dev));
/* Set up the timer callout */
callout_init_mtx(&pf->timer, &pf->pf_mtx, 0);
- /* Save off the PCI information */
- hw->vendor_id = pci_get_vendor(dev);
- hw->device_id = pci_get_device(dev);
- hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
- hw->subsystem_vendor_id =
- pci_read_config(dev, PCIR_SUBVEND_0, 2);
- hw->subsystem_device_id =
- pci_read_config(dev, PCIR_SUBDEV_0, 2);
-
- hw->bus.device = pci_get_slot(dev);
- hw->bus.func = pci_get_function(dev);
-
- pf->vc_debug_lvl = 1;
-
/* Do PCI setup - map BAR0, etc */
if (ixl_allocate_pci_resources(pf)) {
device_printf(dev, "Allocation of PCI resources failed\n");
@@ -478,42 +372,45 @@ ixl_attach(device_t dev)
/* Establish a clean starting point */
i40e_clear_hw(hw);
- error = i40e_pf_reset(hw);
- if (error) {
- device_printf(dev, "PF reset failure %d\n", error);
+ status = i40e_pf_reset(hw);
+ if (status) {
+ device_printf(dev, "PF reset failure %s\n",
+ i40e_stat_str(hw, status));
error = EIO;
goto err_out;
}
- /* Set admin queue parameters */
- hw->aq.num_arq_entries = IXL_AQ_LEN;
- hw->aq.num_asq_entries = IXL_AQ_LEN;
- hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
- hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
-
- /* Initialize mac filter list for VSI */
- SLIST_INIT(&vsi->ftl);
-
/* Initialize the shared code */
- error = i40e_init_shared_code(hw);
- if (error) {
- device_printf(dev, "Unable to initialize shared code, error %d\n",
- error);
+ status = i40e_init_shared_code(hw);
+ if (status) {
+ device_printf(dev, "Unable to initialize shared code, error %s\n",
+ i40e_stat_str(hw, status));
error = EIO;
goto err_out;
}
+ /*
+ * Allocate interrupts and figure out number of queues to use
+ * for PF interface
+ */
+ pf->msix = ixl_init_msix(pf);
+
/* Set up the admin queue */
- error = i40e_init_adminq(hw);
- if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
- device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
- error);
+ hw->aq.num_arq_entries = IXL_AQ_LEN;
+ hw->aq.num_asq_entries = IXL_AQ_LEN;
+ hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
+
+ status = i40e_init_adminq(hw);
+ if (status != 0 && status != I40E_ERR_FIRMWARE_API_VERSION) {
+ device_printf(dev, "Unable to initialize Admin Queue, error %s\n",
+ i40e_stat_str(hw, status));
error = EIO;
goto err_out;
}
ixl_print_nvm_version(pf);
- if (error == I40E_ERR_FIRMWARE_API_VERSION) {
+ if (status == I40E_ERR_FIRMWARE_API_VERSION) {
device_printf(dev, "The driver for the device stopped "
"because the NVM image is newer than expected.\n"
"You must install the most recent version of "
@@ -544,24 +441,44 @@ ixl_attach(device_t dev)
}
/* Set up host memory cache */
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ status = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ if (status) {
+ device_printf(dev, "init_lan_hmc failed: %s\n",
+ i40e_stat_str(hw, status));
goto err_get_cap;
}
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ status = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (status) {
+ device_printf(dev, "configure_lan_hmc failed: %s\n",
+ i40e_stat_str(hw, status));
+ goto err_mac_hmc;
+ }
+
+ /* Init queue allocation manager */
+ error = ixl_pf_qmgr_init(&pf->qmgr, hw->func_caps.num_tx_qp);
+ if (error) {
+ device_printf(dev, "Failed to init queue manager for PF queues, error %d\n",
+ error);
+ goto err_mac_hmc;
+ }
+ /* reserve a contiguous allocation for the PF's VSI */
+ error = ixl_pf_qmgr_alloc_contiguous(&pf->qmgr, vsi->num_queues, &pf->qtag);
if (error) {
- device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ device_printf(dev, "Failed to reserve queues for PF LAN VSI, error %d\n",
+ error);
goto err_mac_hmc;
}
+ device_printf(dev, "Allocating %d queues for PF LAN VSI; %d queues active\n",
+ pf->qtag.num_allocated, pf->qtag.num_active);
/* Disable LLDP from the firmware for certain NVM versions */
if (((pf->hw.aq.fw_maj_ver == 4) && (pf->hw.aq.fw_min_ver < 3)) ||
(pf->hw.aq.fw_maj_ver < 4))
i40e_aq_stop_lldp(hw, TRUE, NULL);
+ /* Get MAC addresses from hardware */
i40e_get_mac_addr(hw, hw->mac.addr);
error = i40e_validate_mac_addr(hw->mac.addr);
if (error) {
@@ -571,35 +488,29 @@ ixl_attach(device_t dev)
bcopy(hw->mac.addr, hw->mac.perm_addr, ETHER_ADDR_LEN);
i40e_get_port_mac_addr(hw, hw->mac.port_addr);
- /* Set up VSI and queues */
- if (ixl_setup_stations(pf) != 0) {
+ /* Initialize mac filter list for VSI */
+ SLIST_INIT(&vsi->ftl);
+
+ /* Set up SW VSI and allocate queue memory and rings */
+ if (ixl_setup_stations(pf)) {
device_printf(dev, "setup stations failed!\n");
error = ENOMEM;
goto err_mac_hmc;
}
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)) {
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "link restart failed, aq_err=%d\n",
- pf->hw.aq.asq_last_status);
- goto err_late;
- }
- }
-
- /* Determine link state */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
-
/* Setup OS network interface / ifnet */
- if (ixl_setup_interface(dev, vsi) != 0) {
+ if (ixl_setup_interface(dev, vsi)) {
device_printf(dev, "interface setup failed!\n");
error = EIO;
goto err_late;
}
+ /* Determine link state */
+ if (ixl_attach_get_link_status(pf)) {
+ error = EINVAL;
+ goto err_late;
+ }
+
error = ixl_switch_config(pf);
if (error) {
device_printf(dev, "Initial ixl_switch_config() failed: %d\n",
@@ -608,17 +519,17 @@ ixl_attach(device_t dev)
}
/* Limit PHY interrupts to link, autoneg, and modules failure */
- error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
+ status = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
NULL);
- if (error) {
- device_printf(dev, "i40e_aq_set_phy_mask() failed: err %d,"
- " aq_err %d\n", error, hw->aq.asq_last_status);
+ if (status) {
+ device_printf(dev, "i40e_aq_set_phy_mask() failed: err %s,"
+ " aq_err %s\n", i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
goto err_late;
}
/* Get the bus configuration and set the shared code's config */
- bus = ixl_get_bus_info(hw, dev);
- i40e_set_pci_config_data(hw, bus);
+ ixl_get_bus_info(hw, dev);
/*
* In MSI-X mode, initialize the Admin Queue interrupt,
@@ -656,26 +567,7 @@ ixl_attach(device_t dev)
ixl_unregister_vlan, vsi, EVENTHANDLER_PRI_FIRST);
#ifdef PCI_IOV
- /* SR-IOV is only supported when MSI-X is in use. */
- if (pf->msix > 1) {
- pf_schema = pci_iov_schema_alloc_node();
- vf_schema = pci_iov_schema_alloc_node();
- pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
- pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
- IOV_SCHEMA_HASDEFAULT, TRUE);
- pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
- IOV_SCHEMA_HASDEFAULT, FALSE);
- pci_iov_schema_add_bool(vf_schema, "allow-promisc",
- IOV_SCHEMA_HASDEFAULT, FALSE);
-
- iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
- if (iov_error != 0) {
- device_printf(dev,
- "Failed to initialize SR-IOV (error=%d)\n",
- iov_error);
- } else
- device_printf(dev, "SR-IOV ready\n");
- }
+ ixl_initialize_sriov(pf);
#endif
#ifdef DEV_NETMAP
@@ -685,8 +577,10 @@ ixl_attach(device_t dev)
return (0);
err_late:
- if (vsi->ifp != NULL)
+ if (vsi->ifp != NULL) {
+ ether_ifdetach(vsi->ifp);
if_free(vsi->ifp);
+ }
err_mac_hmc:
i40e_shutdown_lan_hmc(hw);
err_get_cap:
@@ -766,6 +660,7 @@ ixl_detach(device_t dev)
#ifdef DEV_NETMAP
netmap_detach(vsi->ifp);
#endif /* DEV_NETMAP */
+ ixl_pf_qmgr_destroy(&pf->qmgr);
ixl_free_pci_resources(pf);
bus_generic_detach(dev);
if_free(vsi->ifp);
@@ -788,6692 +683,3 @@ ixl_shutdown(device_t dev)
return (0);
}
-
-/*********************************************************************
- *
- * Get the hardware capabilities
- *
- **********************************************************************/
-
-static int
-ixl_get_hw_capabilities(struct ixl_pf *pf)
-{
- struct i40e_aqc_list_capabilities_element_resp *buf;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int error, len;
- u16 needed;
- bool again = TRUE;
-
- len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
-retry:
- if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
- malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate cap memory\n");
- return (ENOMEM);
- }
-
- /* This populates the hw struct */
- error = i40e_aq_discover_capabilities(hw, buf, len,
- &needed, i40e_aqc_opc_list_func_capabilities, NULL);
- free(buf, M_DEVBUF);
- if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
- (again == TRUE)) {
- /* retry once with a larger buffer */
- again = FALSE;
- len = needed;
- goto retry;
- } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
- device_printf(dev, "capability discovery failed: %d\n",
- pf->hw.aq.asq_last_status);
- return (ENODEV);
- }
-
- /* Capture this PF's starting queue pair */
- pf->qbase = hw->func_caps.base_queue;
-
-#ifdef IXL_DEBUG
- device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
- "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
- hw->pf_id, hw->func_caps.num_vfs,
- hw->func_caps.num_msix_vectors,
- hw->func_caps.num_msix_vectors_vf,
- hw->func_caps.fd_filters_guaranteed,
- hw->func_caps.fd_filters_best_effort,
- hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp,
- hw->func_caps.base_queue);
-#endif
- return (error);
-}
-
-static void
-ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
-{
- device_t dev = vsi->dev;
-
- /* Enable/disable TXCSUM/TSO4 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- ifp->if_capenable |= IFCAP_TXCSUM;
- /* enable TXCSUM, restore TSO if previously enabled */
- if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable |= IFCAP_TSO4;
- }
- }
- else if (mask & IFCAP_TSO4) {
- ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
- device_printf(dev,
- "TSO4 requires txcsum, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && !(ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM)
- ifp->if_capenable &= ~IFCAP_TXCSUM;
- else if (mask & IFCAP_TSO4)
- ifp->if_capenable |= IFCAP_TSO4;
- } else if((ifp->if_capenable & IFCAP_TXCSUM)
- && (ifp->if_capenable & IFCAP_TSO4)) {
- if (mask & IFCAP_TXCSUM) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO4;
- ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
- device_printf(dev,
- "TSO4 requires txcsum, disabling both...\n");
- } else if (mask & IFCAP_TSO4)
- ifp->if_capenable &= ~IFCAP_TSO4;
- }
-
- /* Enable/disable TXCSUM_IPV6/TSO6 */
- if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
- if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable |= IFCAP_TSO6;
- }
- } else if (mask & IFCAP_TSO6) {
- ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
- device_printf(dev,
- "TSO6 requires txcsum6, enabling both...\n");
- }
- } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && !(ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6)
- ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
- else if (mask & IFCAP_TSO6)
- ifp->if_capenable |= IFCAP_TSO6;
- } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- && (ifp->if_capenable & IFCAP_TSO6)) {
- if (mask & IFCAP_TXCSUM_IPV6) {
- vsi->flags |= IXL_FLAGS_KEEP_TSO6;
- ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
- device_printf(dev,
- "TSO6 requires txcsum6, disabling both...\n");
- } else if (mask & IFCAP_TSO6)
- ifp->if_capenable &= ~IFCAP_TSO6;
- }
-}
-
-/*********************************************************************
- * Ioctl entry point
- *
- * ixl_ioctl is called when the user wants to configure the
- * interface.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static int
-ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = vsi->back;
- struct ifreq *ifr = (struct ifreq *)data;
- struct ifdrv *ifd = (struct ifdrv *)data;
-#if defined(INET) || defined(INET6)
- struct ifaddr *ifa = (struct ifaddr *)data;
- bool avoid_reset = FALSE;
-#endif
- int error = 0;
-
- switch (command) {
-
- case SIOCSIFADDR:
-#ifdef INET
- if (ifa->ifa_addr->sa_family == AF_INET)
- avoid_reset = TRUE;
-#endif
-#ifdef INET6
- if (ifa->ifa_addr->sa_family == AF_INET6)
- avoid_reset = TRUE;
-#endif
-#if defined(INET) || defined(INET6)
- /*
- ** Calling init results in link renegotiation,
- ** so we avoid doing it when possible.
- */
- if (avoid_reset) {
- ifp->if_flags |= IFF_UP;
- if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
- ixl_init(pf);
-#ifdef INET
- if (!(ifp->if_flags & IFF_NOARP))
- arp_ifinit(ifp, ifa);
-#endif
- } else
- error = ether_ioctl(ifp, command, data);
- break;
-#endif
- case SIOCSIFMTU:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
- if (ifr->ifr_mtu > IXL_MAX_FRAME -
- ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
- error = EINVAL;
- } else {
- IXL_PF_LOCK(pf);
- ifp->if_mtu = ifr->ifr_mtu;
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING)
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCSIFFLAGS:
- IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
- IXL_PF_LOCK(pf);
- if (ifp->if_flags & IFF_UP) {
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
- if ((ifp->if_flags ^ pf->if_flags) &
- (IFF_PROMISC | IFF_ALLMULTI)) {
- ixl_set_promisc(vsi);
- }
- } else {
- IXL_PF_UNLOCK(pf);
- ixl_init(pf);
- IXL_PF_LOCK(pf);
- }
- } else {
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_UNLOCK(pf);
- ixl_stop(pf);
- IXL_PF_LOCK(pf);
- }
- }
- pf->if_flags = ifp->if_flags;
- IXL_PF_UNLOCK(pf);
- break;
- case SIOCSDRVSPEC:
- case SIOCGDRVSPEC:
- IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
- "Info)\n");
-
- /* NVM update command */
- if (ifd->ifd_cmd == I40E_NVM_ACCESS)
- error = ixl_handle_nvmupd_cmd(pf, ifd);
- else
- error = EINVAL;
- break;
- case SIOCADDMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
- ixl_add_multi(vsi);
- ixl_enable_intr(vsi);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCDELMULTI:
- IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_disable_intr(vsi);
- ixl_del_multi(vsi);
- ixl_enable_intr(vsi);
- IXL_PF_UNLOCK(pf);
- }
- break;
- case SIOCSIFMEDIA:
- case SIOCGIFMEDIA:
-#ifdef IFM_ETH_XTYPE
- case SIOCGIFXMEDIA:
-#endif
- IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
- error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
- break;
- case SIOCSIFCAP:
- {
- int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
- IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
-
- ixl_cap_txcsum_tso(vsi, ifp, mask);
-
- if (mask & IFCAP_RXCSUM)
- ifp->if_capenable ^= IFCAP_RXCSUM;
- if (mask & IFCAP_RXCSUM_IPV6)
- ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
- if (mask & IFCAP_LRO)
- ifp->if_capenable ^= IFCAP_LRO;
- if (mask & IFCAP_VLAN_HWTAGGING)
- ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
- if (mask & IFCAP_VLAN_HWFILTER)
- ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
- if (mask & IFCAP_VLAN_HWTSO)
- ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- IXL_PF_LOCK(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
- }
- VLAN_CAPABILITIES(ifp);
-
- break;
- }
-
- default:
- IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
- error = ether_ioctl(ifp, command, data);
- break;
- }
-
- return (error);
-}
-
-
-/*********************************************************************
- * Init entry point
- *
- * This routine is used in two ways. It is used by the stack as
- * init entry point in network interface structure. It is also used
- * by the driver as a hw/sw initialization routine to get to a
- * consistent state.
- *
- * return 0 on success, positive on failure
- **********************************************************************/
-
-static void
-ixl_init_locked(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
- struct i40e_filter_control_settings filter;
- u8 tmpaddr[ETHER_ADDR_LEN];
- int ret;
-
- mtx_assert(&pf->pf_mtx, MA_OWNED);
- INIT_DEBUGOUT("ixl_init_locked: begin");
-
- ixl_stop_locked(pf);
-
- /* Get the latest mac address... User might use a LAA */
- bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
- I40E_ETH_LENGTH_OF_ADDRESS);
- if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
- (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
- ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
- bcopy(tmpaddr, hw->mac.addr,
- I40E_ETH_LENGTH_OF_ADDRESS);
- ret = i40e_aq_mac_address_write(hw,
- I40E_AQC_WRITE_TYPE_LAA_ONLY,
- hw->mac.addr, NULL);
- if (ret) {
- device_printf(dev, "LLA address"
- "change failed!!\n");
- return;
- }
- }
-
- ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
-
- /* Set the various hardware offload abilities */
- ifp->if_hwassist = 0;
- if (ifp->if_capenable & IFCAP_TSO)
- ifp->if_hwassist |= CSUM_TSO;
- if (ifp->if_capenable & IFCAP_TXCSUM)
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
- if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
- ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
-
- /* Set up the device filtering */
- bzero(&filter, sizeof(filter));
- filter.enable_ethtype = TRUE;
- filter.enable_macvlan = TRUE;
-#ifdef IXL_FDIR
- filter.enable_fdir = TRUE;
-#endif
- filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
- if (i40e_set_filter_control(hw, &filter))
- device_printf(dev, "i40e_set_filter_control() failed\n");
-
- /* Set up RSS */
- ixl_config_rss(vsi);
-
- /* Prepare the VSI: rings, hmc contexts, etc... */
- if (ixl_initialize_vsi(vsi)) {
- device_printf(dev, "initialize vsi failed!!\n");
- return;
- }
-
- /* Add protocol filters to list */
- ixl_init_filters(vsi);
-
- /* Setup vlan's if needed */
- ixl_setup_vlan_filters(vsi);
-
- /* Set up MSI/X routing and the ITR settings */
- if (ixl_enable_msix) {
- ixl_configure_queue_intr_msix(pf);
- ixl_configure_itr(pf);
- } else
- ixl_configure_legacy(pf);
-
- ixl_enable_rings(vsi);
-
- i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
-
- ixl_reconfigure_filters(vsi);
-
- /* And now turn on interrupts */
- ixl_enable_intr(vsi);
-
- /* Get link info */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- ixl_update_link_status(pf);
-
- /* Set initial advertised speed sysctl value */
- ixl_get_initial_advertised_speeds(pf);
-
- /* Start the local timer */
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
-
- /* Now inform the stack we're ready */
- ifp->if_drv_flags |= IFF_DRV_RUNNING;
-
- return;
-}
-
-/* For the set_advertise sysctl */
-static void
-ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- enum i40e_status_code status;
- struct i40e_aq_get_phy_abilities_resp abilities;
-
- /* Set initial sysctl values */
- status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
- NULL);
- if (status) {
- /* Non-fatal error */
- device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
- __func__, status);
- return;
- }
-
- if (abilities.link_speed & I40E_LINK_SPEED_40GB)
- pf->advertised_speed |= 0x10;
- if (abilities.link_speed & I40E_LINK_SPEED_20GB)
- pf->advertised_speed |= 0x8;
- if (abilities.link_speed & I40E_LINK_SPEED_10GB)
- pf->advertised_speed |= 0x4;
- if (abilities.link_speed & I40E_LINK_SPEED_1GB)
- pf->advertised_speed |= 0x2;
- if (abilities.link_speed & I40E_LINK_SPEED_100MB)
- pf->advertised_speed |= 0x1;
-}
-
-static int
-ixl_teardown_hw_structs(struct ixl_pf *pf)
-{
- enum i40e_status_code status = 0;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
-
- /* Shutdown LAN HMC */
- if (hw->hmc.hmc_obj) {
- status = i40e_shutdown_lan_hmc(hw);
- if (status) {
- device_printf(dev,
- "init: LAN HMC shutdown failure; status %d\n", status);
- goto err_out;
- }
- }
-
- // XXX: This gets called when we know the adminq is inactive;
- // so we already know it's setup when we get here.
-
- /* Shutdown admin queue */
- status = i40e_shutdown_adminq(hw);
- if (status)
- device_printf(dev,
- "init: Admin Queue shutdown failure; status %d\n", status);
-
-err_out:
- return (status);
-}
-
-static int
-ixl_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- u8 set_fc_err_mask;
- int error = 0;
-
- // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
- i40e_clear_hw(hw);
- error = i40e_pf_reset(hw);
- if (error) {
- device_printf(dev, "init: PF reset failure");
- error = EIO;
- goto err_out;
- }
-
- error = i40e_init_adminq(hw);
- if (error) {
- device_printf(dev, "init: Admin queue init failure;"
- " status code %d", error);
- error = EIO;
- goto err_out;
- }
-
- i40e_clear_pxe_mode(hw);
-
- error = ixl_get_hw_capabilities(pf);
- if (error) {
- device_printf(dev, "init: Error retrieving HW capabilities;"
- " status code %d\n", error);
- goto err_out;
- }
-
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init: LAN HMC init failed; status code %d\n",
- error);
- error = EIO;
- goto err_out;
- }
-
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (error) {
- device_printf(dev, "init: LAN HMC config failed; status code %d\n",
- error);
- error = EIO;
- goto err_out;
- }
-
- // XXX: possible fix for panic, but our failure recovery is still broken
- error = ixl_switch_config(pf);
- if (error) {
- device_printf(dev, "init: ixl_switch_config() failed: %d\n",
- error);
- goto err_out;
- }
-
- error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
- NULL);
- if (error) {
- device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
- " aq_err %d\n", error, hw->aq.asq_last_status);
- error = EIO;
- goto err_out;
- }
-
- error = i40e_set_fc(hw, &set_fc_err_mask, true);
- if (error) {
- device_printf(dev, "init: setting link flow control failed; retcode %d,"
- " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
- goto err_out;
- }
-
- // XXX: (Rebuild VSIs?)
-
- /* Firmware delay workaround */
- if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
- (hw->aq.fw_maj_ver < 4)) {
- i40e_msec_delay(75);
- error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
- if (error) {
- device_printf(dev, "init: link restart failed, aq_err %d\n",
- hw->aq.asq_last_status);
- goto err_out;
- }
- }
-
-
-err_out:
- return (error);
-}
-
-static void
-ixl_init(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = pf->dev;
- int error = 0;
-
- /*
- * If the aq is dead here, it probably means something outside of the driver
- * did something to the adapter, like a PF reset.
- * So rebuild the driver's state here if that occurs.
- */
- if (!i40e_check_asq_alive(&pf->hw)) {
- device_printf(dev, "Admin Queue is down; resetting...\n");
- IXL_PF_LOCK(pf);
- ixl_teardown_hw_structs(pf);
- ixl_reset(pf);
- IXL_PF_UNLOCK(pf);
- }
-
- /*
- * Set up LAN queue interrupts here.
- * Kernel interrupt setup functions cannot be called while holding a lock,
- * so this is done outside of init_locked().
- */
- if (pf->msix > 1) {
- /* Teardown existing interrupts, if they exist */
- ixl_teardown_queue_msix(vsi);
- ixl_free_queue_tqs(vsi);
- /* Then set them up again */
- error = ixl_setup_queue_msix(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
- error);
- error = ixl_setup_queue_tqs(vsi);
- if (error)
- device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
- error);
- } else
- // possibly broken
- error = ixl_assign_vsi_legacy(pf);
- if (error) {
- device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
- return;
- }
-
- IXL_PF_LOCK(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** MSIX Interrupt Handlers and Tasklets
-*/
-static void
-ixl_handle_que(void *context, int pending)
-{
- struct ixl_queue *que = context;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- struct ifnet *ifp = vsi->ifp;
- bool more;
-
- if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
- more = ixl_rxeof(que, IXL_RX_LIMIT);
- IXL_TX_LOCK(txr);
- ixl_txeof(que);
- if (!drbr_empty(ifp, txr->br))
- ixl_mq_start_locked(ifp, txr);
- IXL_TX_UNLOCK(txr);
- if (more) {
- taskqueue_enqueue(que->tq, &que->task);
- return;
- }
- }
-
- /* Reenable this interrupt - hmmm */
- ixl_enable_queue(hw, que->me);
- return;
-}
-
-
-/*********************************************************************
- *
- * Legacy Interrupt Service routine
- *
- **********************************************************************/
-void
-ixl_intr(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct ifnet *ifp = vsi->ifp;
- struct tx_ring *txr = &que->txr;
- u32 reg, icr0, mask;
- bool more_tx, more_rx;
-
- ++que->irqs;
-
- /* Protect against spurious interrupts */
- if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
- return;
-
- icr0 = rd32(hw, I40E_PFINT_ICR0);
-
- reg = rd32(hw, I40E_PFINT_DYN_CTL0);
- reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-
- mask = rd32(hw, I40E_PFINT_ICR0_ENA);
-
-#ifdef PCI_IOV
- if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
-#endif
-
- if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
- taskqueue_enqueue(pf->tq, &pf->adminq);
- return;
- }
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
-
- /* re-enable other interrupt causes */
- wr32(hw, I40E_PFINT_ICR0_ENA, mask);
-
- /* And now the queues */
- reg = rd32(hw, I40E_QINT_RQCTL(0));
- reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
- wr32(hw, I40E_QINT_RQCTL(0), reg);
-
- reg = rd32(hw, I40E_QINT_TQCTL(0));
- reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
- reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
- wr32(hw, I40E_QINT_TQCTL(0), reg);
-
- ixl_enable_legacy(hw);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * MSIX VSI Interrupt Service routine
- *
- **********************************************************************/
-void
-ixl_msix_que(void *arg)
-{
- struct ixl_queue *que = arg;
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- bool more_tx, more_rx;
-
- /* Protect against spurious interrupts */
- if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
- return;
-
- ++que->irqs;
-
- more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
-
- IXL_TX_LOCK(txr);
- more_tx = ixl_txeof(que);
- /*
- ** Make certain that if the stack
- ** has anything queued the task gets
- ** scheduled to handle it.
- */
- if (!drbr_empty(vsi->ifp, txr->br))
- more_tx = 1;
- IXL_TX_UNLOCK(txr);
-
- ixl_set_queue_rx_itr(que);
- ixl_set_queue_tx_itr(que);
-
- if (more_tx || more_rx)
- taskqueue_enqueue(que->tq, &que->task);
- else
- ixl_enable_queue(hw, que->me);
-
- return;
-}
-
-
-/*********************************************************************
- *
- * MSIX Admin Queue Interrupt Service routine
- *
- **********************************************************************/
-static void
-ixl_msix_adminq(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- u32 reg, mask, rstat_reg;
- bool do_task = FALSE;
-
- ++pf->admin_irq;
-
- reg = rd32(hw, I40E_PFINT_ICR0);
- mask = rd32(hw, I40E_PFINT_ICR0_ENA);
-
- /* Check on the cause */
- if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
- mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
- do_task = TRUE;
- }
-
- if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
- ixl_handle_mdd_event(pf);
- mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
- }
-
- if (reg & I40E_PFINT_ICR0_GRST_MASK) {
- device_printf(pf->dev, "Reset Requested!\n");
- rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
- rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
- >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
- device_printf(pf->dev, "Reset type: ");
- switch (rstat_reg) {
- /* These others might be handled similarly to an EMPR reset */
- case I40E_RESET_CORER:
- printf("CORER\n");
- break;
- case I40E_RESET_GLOBR:
- printf("GLOBR\n");
- break;
- case I40E_RESET_EMPR:
- printf("EMPR\n");
- atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
- break;
- default:
- printf("?\n");
- break;
- }
- // overload admin queue task to check reset progress?
- do_task = TRUE;
- }
-
- if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
- device_printf(pf->dev, "ECC Error detected!\n");
- }
-
- if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
- device_printf(pf->dev, "HMC Error detected!\n");
- }
-
- if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
- device_printf(pf->dev, "PCI Exception detected!\n");
- }
-
-#ifdef PCI_IOV
- if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
- mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
- taskqueue_enqueue(pf->tq, &pf->vflr_task);
- }
-#endif
-
- reg = rd32(hw, I40E_PFINT_DYN_CTL0);
- reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-
- if (do_task)
- taskqueue_enqueue(pf->tq, &pf->adminq);
-}
-
-/*********************************************************************
- *
- * Media Ioctl callback
- *
- * This routine is called whenever the user queries the status of
- * the interface using ifconfig.
- *
- **********************************************************************/
-static void
-ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ixl_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
-
- INIT_DEBUGOUT("ixl_media_status: begin");
- IXL_PF_LOCK(pf);
-
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
- ixl_update_link_status(pf);
-
- ifmr->ifm_status = IFM_AVALID;
- ifmr->ifm_active = IFM_ETHER;
-
- if (!pf->link_up) {
- IXL_PF_UNLOCK(pf);
- return;
- }
-
- ifmr->ifm_status |= IFM_ACTIVE;
-
- /* Hardware always does full-duplex */
- ifmr->ifm_active |= IFM_FDX;
-
- switch (hw->phy.link_info.phy_type) {
- /* 100 M */
- case I40E_PHY_TYPE_100BASE_TX:
- ifmr->ifm_active |= IFM_100_TX;
- break;
- /* 1 G */
- case I40E_PHY_TYPE_1000BASE_T:
- ifmr->ifm_active |= IFM_1000_T;
- break;
- case I40E_PHY_TYPE_1000BASE_SX:
- ifmr->ifm_active |= IFM_1000_SX;
- break;
- case I40E_PHY_TYPE_1000BASE_LX:
- ifmr->ifm_active |= IFM_1000_LX;
- break;
- case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- /* 10 G */
- case I40E_PHY_TYPE_10GBASE_SFPP_CU:
- ifmr->ifm_active |= IFM_10G_TWINAX;
- break;
- case I40E_PHY_TYPE_10GBASE_SR:
- ifmr->ifm_active |= IFM_10G_SR;
- break;
- case I40E_PHY_TYPE_10GBASE_LR:
- ifmr->ifm_active |= IFM_10G_LR;
- break;
- case I40E_PHY_TYPE_10GBASE_T:
- ifmr->ifm_active |= IFM_10G_T;
- break;
- case I40E_PHY_TYPE_XAUI:
- case I40E_PHY_TYPE_XFI:
- case I40E_PHY_TYPE_10GBASE_AOC:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- /* 40 G */
- case I40E_PHY_TYPE_40GBASE_CR4:
- case I40E_PHY_TYPE_40GBASE_CR4_CU:
- ifmr->ifm_active |= IFM_40G_CR4;
- break;
- case I40E_PHY_TYPE_40GBASE_SR4:
- ifmr->ifm_active |= IFM_40G_SR4;
- break;
- case I40E_PHY_TYPE_40GBASE_LR4:
- ifmr->ifm_active |= IFM_40G_LR4;
- break;
- case I40E_PHY_TYPE_XLAUI:
- ifmr->ifm_active |= IFM_OTHER;
- break;
-#ifndef IFM_ETH_XTYPE
- case I40E_PHY_TYPE_1000BASE_KX:
- ifmr->ifm_active |= IFM_1000_CX;
- break;
- case I40E_PHY_TYPE_SGMII:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
- case I40E_PHY_TYPE_10GBASE_CR1:
- ifmr->ifm_active |= IFM_10G_TWINAX;
- break;
- case I40E_PHY_TYPE_10GBASE_KX4:
- ifmr->ifm_active |= IFM_10G_CX4;
- break;
- case I40E_PHY_TYPE_10GBASE_KR:
- ifmr->ifm_active |= IFM_10G_SR;
- break;
- case I40E_PHY_TYPE_SFI:
- ifmr->ifm_active |= IFM_OTHER;
- break;
- case I40E_PHY_TYPE_40GBASE_KR4:
- case I40E_PHY_TYPE_XLPPI:
- case I40E_PHY_TYPE_40GBASE_AOC:
- ifmr->ifm_active |= IFM_40G_SR4;
- break;
-#else
- case I40E_PHY_TYPE_1000BASE_KX:
- ifmr->ifm_active |= IFM_1000_KX;
- break;
- case I40E_PHY_TYPE_SGMII:
- ifmr->ifm_active |= IFM_1000_SGMII;
- break;
- /* ERJ: What's the difference between these? */
- case I40E_PHY_TYPE_10GBASE_CR1_CU:
- case I40E_PHY_TYPE_10GBASE_CR1:
- ifmr->ifm_active |= IFM_10G_CR1;
- break;
- case I40E_PHY_TYPE_10GBASE_KX4:
- ifmr->ifm_active |= IFM_10G_KX4;
- break;
- case I40E_PHY_TYPE_10GBASE_KR:
- ifmr->ifm_active |= IFM_10G_KR;
- break;
- case I40E_PHY_TYPE_SFI:
- ifmr->ifm_active |= IFM_10G_SFI;
- break;
- /* Our single 20G media type */
- case I40E_PHY_TYPE_20GBASE_KR2:
- ifmr->ifm_active |= IFM_20G_KR2;
- break;
- case I40E_PHY_TYPE_40GBASE_KR4:
- ifmr->ifm_active |= IFM_40G_KR4;
- break;
- case I40E_PHY_TYPE_XLPPI:
- case I40E_PHY_TYPE_40GBASE_AOC:
- ifmr->ifm_active |= IFM_40G_XLPPI;
- break;
-#endif
- /* Unknown to driver */
- default:
- ifmr->ifm_active |= IFM_UNKNOWN;
- break;
- }
- /* Report flow control status as well */
- if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
- ifmr->ifm_active |= IFM_ETH_TXPAUSE;
- if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
- ifmr->ifm_active |= IFM_ETH_RXPAUSE;
-
- IXL_PF_UNLOCK(pf);
-
- return;
-}
-
-/*
- * NOTE: Fortville does not support forcing media speeds. Instead,
- * use the set_advertise sysctl to set the speeds Fortville
- * will advertise or be allowed to operate at.
- */
-static int
-ixl_media_change(struct ifnet * ifp)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct ifmedia *ifm = &vsi->media;
-
- INIT_DEBUGOUT("ixl_media_change: begin");
-
- if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
- return (EINVAL);
-
- if_printf(ifp, "Media change is not supported.\n");
-
- return (ENODEV);
-}
-
-
-#ifdef IXL_FDIR
-/*
-** ATR: Application Targetted Receive - creates a filter
-** based on TX flow info that will keep the receive
-** portion of the flow on the same queue. Based on the
-** implementation this is only available for TCP connections
-*/
-void
-ixl_atr(struct ixl_queue *que, struct tcphdr *th, int etype)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct tx_ring *txr = &que->txr;
- struct i40e_filter_program_desc *FDIR;
- u32 ptype, dtype;
- int idx;
-
- /* check if ATR is enabled and sample rate */
- if ((!ixl_enable_fdir) || (!txr->atr_rate))
- return;
- /*
- ** We sample all TCP SYN/FIN packets,
- ** or at the selected sample rate
- */
- txr->atr_count++;
- if (((th->th_flags & (TH_FIN | TH_SYN)) == 0) &&
- (txr->atr_count < txr->atr_rate))
- return;
- txr->atr_count = 0;
-
- /* Get a descriptor to use */
- idx = txr->next_avail;
- FDIR = (struct i40e_filter_program_desc *) &txr->base[idx];
- if (++idx == que->num_desc)
- idx = 0;
- txr->avail--;
- txr->next_avail = idx;
-
- ptype = (que->me << I40E_TXD_FLTR_QW0_QINDEX_SHIFT) &
- I40E_TXD_FLTR_QW0_QINDEX_MASK;
-
- ptype |= (etype == ETHERTYPE_IP) ?
- (I40E_FILTER_PCTYPE_NONF_IPV4_TCP <<
- I40E_TXD_FLTR_QW0_PCTYPE_SHIFT) :
- (I40E_FILTER_PCTYPE_NONF_IPV6_TCP <<
- I40E_TXD_FLTR_QW0_PCTYPE_SHIFT);
-
- ptype |= vsi->id << I40E_TXD_FLTR_QW0_DEST_VSI_SHIFT;
-
- dtype = I40E_TX_DESC_DTYPE_FILTER_PROG;
-
- /*
- ** We use the TCP TH_FIN as a trigger to remove
- ** the filter, otherwise its an update.
- */
- dtype |= (th->th_flags & TH_FIN) ?
- (I40E_FILTER_PROGRAM_DESC_PCMD_REMOVE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT) :
- (I40E_FILTER_PROGRAM_DESC_PCMD_ADD_UPDATE <<
- I40E_TXD_FLTR_QW1_PCMD_SHIFT);
-
- dtype |= I40E_FILTER_PROGRAM_DESC_DEST_DIRECT_PACKET_QINDEX <<
- I40E_TXD_FLTR_QW1_DEST_SHIFT;
-
- dtype |= I40E_FILTER_PROGRAM_DESC_FD_STATUS_FD_ID <<
- I40E_TXD_FLTR_QW1_FD_STATUS_SHIFT;
-
- FDIR->qindex_flex_ptype_vsi = htole32(ptype);
- FDIR->dtype_cmd_cntindex = htole32(dtype);
- return;
-}
-#endif
-
-
-static void
-ixl_set_promisc(struct ixl_vsi *vsi)
-{
- struct ifnet *ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
- int err, mcnt = 0;
- bool uni = FALSE, multi = FALSE;
-
- if (ifp->if_flags & IFF_ALLMULTI)
- multi = TRUE;
- else { /* Need to count the multicast addresses */
- struct ifmultiaddr *ifma;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- if (mcnt == MAX_MULTICAST_ADDR)
- break;
- mcnt++;
- }
- if_maddr_runlock(ifp);
- }
-
- if (mcnt >= MAX_MULTICAST_ADDR)
- multi = TRUE;
- if (ifp->if_flags & IFF_PROMISC)
- uni = TRUE;
-
- err = i40e_aq_set_vsi_unicast_promiscuous(hw,
- vsi->seid, uni, NULL);
- err = i40e_aq_set_vsi_multicast_promiscuous(hw,
- vsi->seid, multi, NULL);
- return;
-}
-
-/*********************************************************************
- * Filter Routines
- *
- * Routines for multicast and vlan filter management.
- *
- *********************************************************************/
-static void
-ixl_add_multi(struct ixl_vsi *vsi)
-{
- struct ifmultiaddr *ifma;
- struct ifnet *ifp = vsi->ifp;
- struct i40e_hw *hw = vsi->hw;
- int mcnt = 0, flags;
-
- IOCTL_DEBUGOUT("ixl_add_multi: begin");
-
- if_maddr_rlock(ifp);
- /*
- ** First just get a count, to decide if we
- ** we simply use multicast promiscuous.
- */
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- mcnt++;
- }
- if_maddr_runlock(ifp);
-
- if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
- /* delete existing MC filters */
- ixl_del_hw_filters(vsi, mcnt);
- i40e_aq_set_vsi_multicast_promiscuous(hw,
- vsi->seid, TRUE, NULL);
- return;
- }
-
- mcnt = 0;
- if_maddr_rlock(ifp);
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- ixl_add_mc_filter(vsi,
- (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
- mcnt++;
- }
- if_maddr_runlock(ifp);
- if (mcnt > 0) {
- flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
- ixl_add_hw_filters(vsi, flags, mcnt);
- }
-
- IOCTL_DEBUGOUT("ixl_add_multi: end");
- return;
-}
-
-static void
-ixl_del_multi(struct ixl_vsi *vsi)
-{
- struct ifnet *ifp = vsi->ifp;
- struct ifmultiaddr *ifma;
- struct ixl_mac_filter *f;
- int mcnt = 0;
- bool match = FALSE;
-
- IOCTL_DEBUGOUT("ixl_del_multi: begin");
-
- /* Search for removed multicast addresses */
- if_maddr_rlock(ifp);
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
- match = FALSE;
- TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
- if (ifma->ifma_addr->sa_family != AF_LINK)
- continue;
- u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
- if (cmp_etheraddr(f->macaddr, mc_addr)) {
- match = TRUE;
- break;
- }
- }
- if (match == FALSE) {
- f->flags |= IXL_FILTER_DEL;
- mcnt++;
- }
- }
- }
- if_maddr_runlock(ifp);
-
- if (mcnt > 0)
- ixl_del_hw_filters(vsi, mcnt);
-}
-
-
-/*********************************************************************
- * Timer routine
- *
- * This routine checks for link status,updates statistics,
- * and runs the watchdog check.
- *
- * Only runs when the driver is configured UP and RUNNING.
- *
- **********************************************************************/
-
-static void
-ixl_local_timer(void *arg)
-{
- struct ixl_pf *pf = arg;
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- device_t dev = pf->dev;
- int hung = 0;
- u32 mask;
-
- mtx_assert(&pf->pf_mtx, MA_OWNED);
-
- /* Fire off the adminq task */
- taskqueue_enqueue(pf->tq, &pf->adminq);
-
- /* Update stats */
- ixl_update_stats_counters(pf);
-
- /*
- ** Check status of the queues
- */
- mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- /* Any queues with outstanding work get a sw irq */
- if (que->busy)
- wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
- /*
- ** Each time txeof runs without cleaning, but there
- ** are uncleaned descriptors it increments busy. If
- ** we get to 5 we declare it hung.
- */
- if (que->busy == IXL_QUEUE_HUNG) {
- ++hung;
- /* Mark the queue as inactive */
- vsi->active_queues &= ~((u64)1 << que->me);
- continue;
- } else {
- /* Check if we've come back from hung */
- if ((vsi->active_queues & ((u64)1 << que->me)) == 0)
- vsi->active_queues |= ((u64)1 << que->me);
- }
- if (que->busy >= IXL_MAX_TX_BUSY) {
-#ifdef IXL_DEBUG
- device_printf(dev,"Warning queue %d "
- "appears to be hung!\n", i);
-#endif
- que->busy = IXL_QUEUE_HUNG;
- ++hung;
- }
- }
- /* Only reinit if all queues show hung */
- if (hung == vsi->num_queues)
- goto hung;
-
- callout_reset(&pf->timer, hz, ixl_local_timer, pf);
- return;
-
-hung:
- device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
- ixl_init_locked(pf);
-}
-
-/*
-** Note: this routine updates the OS on the link state
-** the real check of the hardware only happens with
-** a link interrupt.
-*/
-static void
-ixl_update_link_status(struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- struct i40e_hw *hw = &pf->hw;
- struct ifnet *ifp = vsi->ifp;
- device_t dev = pf->dev;
-
- if (pf->link_up) {
- if (vsi->link_active == FALSE) {
- pf->fc = hw->fc.current_mode;
- if (bootverbose) {
- device_printf(dev,"Link is up %d Gbps %s,"
- " Flow Control: %s\n",
- ((pf->link_speed ==
- I40E_LINK_SPEED_40GB)? 40:10),
- "Full Duplex", ixl_fc_string[pf->fc]);
- }
- vsi->link_active = TRUE;
- /*
- ** Warn user if link speed on NPAR enabled
- ** partition is not at least 10GB
- */
- if (hw->func_caps.npar_enable &&
- (hw->phy.link_info.link_speed ==
- I40E_LINK_SPEED_1GB ||
- hw->phy.link_info.link_speed ==
- I40E_LINK_SPEED_100MB))
- device_printf(dev, "The partition detected"
- "link speed that is less than 10Gbps\n");
- if_link_state_change(ifp, LINK_STATE_UP);
- }
- } else { /* Link down */
- if (vsi->link_active == TRUE) {
- if (bootverbose)
- device_printf(dev, "Link is Down\n");
- if_link_state_change(ifp, LINK_STATE_DOWN);
- vsi->link_active = FALSE;
- }
- }
-
- return;
-}
-
-static void
-ixl_stop(struct ixl_pf *pf)
-{
- IXL_PF_LOCK(pf);
- ixl_stop_locked(pf);
- IXL_PF_UNLOCK(pf);
-
- ixl_teardown_queue_msix(&pf->vsi);
- ixl_free_queue_tqs(&pf->vsi);
-}
-
-/*********************************************************************
- *
- * This routine disables all traffic on the adapter by issuing a
- * global reset on the MAC and deallocates TX/RX buffers.
- *
- **********************************************************************/
-
-static void
-ixl_stop_locked(struct ixl_pf *pf)
-{
- struct ixl_vsi *vsi = &pf->vsi;
- struct ifnet *ifp = vsi->ifp;
-
- INIT_DEBUGOUT("ixl_stop: begin\n");
-
- IXL_PF_LOCK_ASSERT(pf);
-
- /* Stop the local timer */
- callout_stop(&pf->timer);
-
- ixl_disable_rings_intr(vsi);
- ixl_disable_rings(vsi);
-
- /* Tell the stack that the interface is no longer active */
- ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
-}
-
-
-/*********************************************************************
- *
- * Setup MSIX Interrupt resources and handlers for the VSI
- *
- **********************************************************************/
-static int
-ixl_assign_vsi_legacy(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- int error, rid = 0;
-
- if (pf->msix == 1)
- rid = 1;
- pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
- &rid, RF_SHAREABLE | RF_ACTIVE);
- if (pf->res == NULL) {
- device_printf(dev, "Unable to allocate"
- " bus resource: vsi legacy/msi interrupt\n");
- return (ENXIO);
- }
-
- /* Set the handler function */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_intr, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "Failed to register legacy/msi handler\n");
- return (error);
- }
- bus_describe_intr(dev, pf->res, pf->tag, "irq0");
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
- taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
- device_get_nameunit(dev));
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-
- pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
- device_get_nameunit(dev));
-
- return (0);
-}
-
-static int
-ixl_setup_adminq_tq(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int error = 0;
-
- /* Tasklet for Admin Queue interrupts */
- TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
-#ifdef PCI_IOV
- /* VFLR Tasklet */
- TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
-#endif
- /* Create and start Admin Queue taskqueue */
- pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
- taskqueue_thread_enqueue, &pf->tq);
- if (!pf->tq) {
- device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
- return (ENOMEM);
- }
- error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
- device_get_nameunit(dev));
- if (error) {
- device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
- error);
- taskqueue_free(pf->tq);
- return (error);
- }
- return (0);
-}
-
-static int
-ixl_setup_queue_tqs(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
-#ifdef RSS
- cpuset_t cpu_mask;
- int cpu_id;
-#endif
-
- /* Create queue tasks and start queue taskqueues */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
- TASK_INIT(&que->task, 0, ixl_handle_que, que);
- que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
- taskqueue_thread_enqueue, &que->tq);
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
- CPU_SETOF(cpu_id, &cpu_mask);
- taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
- &cpu_mask, "%s (bucket %d)",
- device_get_nameunit(dev), cpu_id);
-#else
- taskqueue_start_threads(&que->tq, 1, PI_NET,
- "%s (que %d)", device_get_nameunit(dev), que->me);
-#endif
- }
-
- return (0);
-}
-
-static void
-ixl_free_adminq_tq(struct ixl_pf *pf)
-{
- if (pf->tq) {
- taskqueue_free(pf->tq);
- pf->tq = NULL;
- }
-}
-
-static void
-ixl_free_queue_tqs(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- if (que->tq) {
- taskqueue_free(que->tq);
- que->tq = NULL;
- }
- }
-}
-
-static int
-ixl_setup_adminq_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid, error = 0;
-
- /* Admin IRQ rid is 1, vector is 0 */
- rid = 1;
- /* Get interrupt resource from bus */
- pf->res = bus_alloc_resource_any(dev,
- SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
- if (!pf->res) {
- device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
- " interrupt failed [rid=%d]\n", rid);
- return (ENXIO);
- }
- /* Then associate interrupt with handler */
- error = bus_setup_intr(dev, pf->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_adminq, pf, &pf->tag);
- if (error) {
- pf->res = NULL;
- device_printf(dev, "bus_setup_intr() for Admin Queue"
- " interrupt handler failed, error %d\n", error);
- return (ENXIO);
- }
- error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
- if (error) {
- /* Probably non-fatal? */
- device_printf(dev, "bus_describe_intr() for Admin Queue"
- " interrupt name failed, error %d\n", error);
- }
- pf->admvec = 0;
-
- return (0);
-}
-
-/*
- * Allocate interrupt resources from bus and associate an interrupt handler
- * to those for the VSI's queues.
- */
-static int
-ixl_setup_queue_msix(struct ixl_vsi *vsi)
-{
- device_t dev = vsi->dev;
- struct ixl_queue *que = vsi->queues;
- struct tx_ring *txr;
- int error, rid, vector = 1;
-
- /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
- for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
- int cpu_id = i;
- rid = vector + 1;
- txr = &que->txr;
- que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
- RF_SHAREABLE | RF_ACTIVE);
- if (!que->res) {
- device_printf(dev, "bus_alloc_resource_any() for"
- " Queue %d interrupt failed [rid=%d]\n",
- que->me, rid);
- return (ENXIO);
- }
- /* Set the handler function */
- error = bus_setup_intr(dev, que->res,
- INTR_TYPE_NET | INTR_MPSAFE, NULL,
- ixl_msix_que, que, &que->tag);
- if (error) {
- device_printf(dev, "bus_setup_intr() for Queue %d"
- " interrupt handler failed, error %d\n",
- que->me, error);
- // TODO: Check for error from this?
- bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- return (error);
- }
- error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
- if (error) {
- device_printf(dev, "bus_describe_intr() for Queue %d"
- " interrupt name failed, error %d\n",
- que->me, error);
- }
- /* Bind the vector to a CPU */
-#ifdef RSS
- cpu_id = rss_getcpu(i % rss_getnumbuckets());
-#endif
- error = bus_bind_intr(dev, que->res, cpu_id);
- if (error) {
- device_printf(dev, "bus_bind_intr() for Queue %d"
- " to CPU %d failed, error %d\n",
- que->me, cpu_id, error);
- }
- que->msix = vector;
- }
-
- return (0);
-}
-
-
-/*
- * Allocate MSI/X vectors
- */
-static int
-ixl_init_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid, want, vectors, queues, available;
-
- /* Override by tuneable */
- if (ixl_enable_msix == 0)
- goto no_msix;
-
- /*
- ** When used in a virtualized environment
- ** PCI BUSMASTER capability may not be set
- ** so explicity set it here and rewrite
- ** the ENABLE in the MSIX control register
- ** at this point to cause the host to
- ** successfully initialize us.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
- /* First try MSI/X */
- rid = PCIR_BAR(IXL_BAR);
- pf->msix_mem = bus_alloc_resource_any(dev,
- SYS_RES_MEMORY, &rid, RF_ACTIVE);
- if (!pf->msix_mem) {
- /* May not be enabled */
- device_printf(pf->dev,
- "Unable to map MSIX table\n");
- goto no_msix;
- }
-
- available = pci_msix_count(dev);
- if (available == 0) { /* system has msix disabled */
- bus_release_resource(dev, SYS_RES_MEMORY,
- rid, pf->msix_mem);
- pf->msix_mem = NULL;
- goto no_msix;
- }
-
- /* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
-
- /* Override with tunable value if tunable is less than autoconfig count */
- if ((ixl_max_queues != 0) && (ixl_max_queues <= queues))
- queues = ixl_max_queues;
- else if ((ixl_max_queues != 0) && (ixl_max_queues > queues))
- device_printf(dev, "ixl_max_queues > # of cpus, using "
- "autoconfig amount...\n");
- /* Or limit maximum auto-configured queues to 8 */
- else if ((ixl_max_queues == 0) && (queues > 8))
- queues = 8;
-
-#ifdef RSS
- /* If we're doing RSS, clamp at the number of RSS buckets */
- if (queues > rss_getnumbuckets())
- queues = rss_getnumbuckets();
-#endif
-
- /*
- ** Want one vector (RX/TX pair) per queue
- ** plus an additional for the admin queue.
- */
- want = queues + 1;
- if (want <= available) /* Have enough */
- vectors = want;
- else {
- device_printf(pf->dev,
- "MSIX Configuration Problem, "
- "%d vectors available but %d wanted!\n",
- available, want);
- return (0); /* Will go to Legacy setup */
- }
-
- if (pci_alloc_msix(dev, &vectors) == 0) {
- device_printf(pf->dev,
- "Using MSIX interrupts with %d vectors\n", vectors);
- pf->msix = vectors;
- pf->vsi.num_queues = queues;
-#ifdef RSS
- /*
- * If we're doing RSS, the number of queues needs to
- * match the number of RSS buckets that are configured.
- *
- * + If there's more queues than RSS buckets, we'll end
- * up with queues that get no traffic.
- *
- * + If there's more RSS buckets than queues, we'll end
- * up having multiple RSS buckets map to the same queue,
- * so there'll be some contention.
- */
- if (queues != rss_getnumbuckets()) {
- device_printf(dev,
- "%s: queues (%d) != RSS buckets (%d)"
- "; performance will be impacted.\n",
- __func__, queues, rss_getnumbuckets());
- }
-#endif
- return (vectors);
- }
-no_msix:
- vectors = pci_msi_count(dev);
- pf->vsi.num_queues = 1;
- ixl_max_queues = 1;
- ixl_enable_msix = 0;
- if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
- device_printf(pf->dev, "Using an MSI interrupt\n");
- else {
- vectors = 0;
- device_printf(pf->dev, "Using a Legacy interrupt\n");
- }
- return (vectors);
-}
-
-/*
- * Configure admin queue/misc interrupt cause registers in hardware.
- */
-static void
-ixl_configure_intr0_msix(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- /* First set up the adminq - vector 0 */
- wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
- rd32(hw, I40E_PFINT_ICR0); /* read to clear */
-
- reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
- I40E_PFINT_ICR0_ENA_GRST_MASK |
- I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
- I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
- I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
- I40E_PFINT_ICR0_ENA_VFLR_MASK |
- I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- /*
- * 0x7FF is the end of the queue list.
- * This means we won't use MSI-X vector 0 for a queue interrupt
- * in MSIX mode.
- */
- wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
- /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
- wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
-
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
-
- wr32(hw, I40E_PFINT_STAT_CTL0, 0);
-}
-
-/*
- * Configure queue interrupt cause registers in hardware.
- */
-static void
-ixl_configure_queue_intr_msix(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- u32 reg;
- u16 vector = 1;
-
- for (int i = 0; i < vsi->num_queues; i++, vector++) {
- wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
- /* First queue type is RX / 0 */
- wr32(hw, I40E_PFINT_LNKLSTN(i), i);
-
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(i), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
- (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
- (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
- (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
- (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(i), reg);
- }
-}
-
-/*
- * Configure for MSI single vector operation
- */
-static void
-ixl_configure_legacy(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- u32 reg;
-
- wr32(hw, I40E_PFINT_ITR0(0), 0);
- wr32(hw, I40E_PFINT_ITR0(1), 0);
-
- /* Setup "other" causes */
- reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
- | I40E_PFINT_ICR0_ENA_GRST_MASK
- | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
- | I40E_PFINT_ICR0_ENA_GPIO_MASK
- | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
- | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
- | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
- | I40E_PFINT_ICR0_ENA_VFLR_MASK
- | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
- ;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
-
- /* SW_ITR_IDX = 0, but don't change INTENA */
- wr32(hw, I40E_PFINT_DYN_CTL0,
- I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
- I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
- /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
- wr32(hw, I40E_PFINT_STAT_CTL0, 0);
-
- /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
- wr32(hw, I40E_PFINT_LNKLST0, 0);
-
- /* Associate the queue pair to the vector and enable the q int */
- reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
- | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
- | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
- wr32(hw, I40E_QINT_RQCTL(0), reg);
-
- reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
- | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
- | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
- wr32(hw, I40E_QINT_TQCTL(0), reg);
-}
-
-
-/*
- * Get initial ITR values from tunable values.
- */
-static void
-ixl_configure_itr(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
-
- vsi->rx_itr_setting = ixl_rx_itr;
- vsi->tx_itr_setting = ixl_tx_itr;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
- vsi->rx_itr_setting);
- rxr->itr = vsi->rx_itr_setting;
- rxr->latency = IXL_AVE_LATENCY;
-
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
- vsi->tx_itr_setting);
- txr->itr = vsi->tx_itr_setting;
- txr->latency = IXL_AVE_LATENCY;
- }
-}
-
-
-static int
-ixl_allocate_pci_resources(struct ixl_pf *pf)
-{
- int rid;
- device_t dev = pf->dev;
-
- rid = PCIR_BAR(0);
- pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
- &rid, RF_ACTIVE);
-
- if (!(pf->pci_mem)) {
- device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
- return (ENXIO);
- }
-
- pf->osdep.mem_bus_space_tag =
- rman_get_bustag(pf->pci_mem);
- pf->osdep.mem_bus_space_handle =
- rman_get_bushandle(pf->pci_mem);
- pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
- pf->osdep.flush_reg = I40E_GLGEN_STAT;
- pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
-
- pf->hw.back = &pf->osdep;
-
- /*
- ** Now setup MSI or MSI/X, should
- ** return us the number of supported
- ** vectors. (Will be 1 for MSI)
- */
- pf->msix = ixl_init_msix(pf);
- return (0);
-}
-
-/*
- * Teardown and release the admin queue/misc vector
- * interrupt.
- */
-static int
-ixl_teardown_adminq_msix(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int rid;
-
- if (pf->admvec) /* we are doing MSIX */
- rid = pf->admvec + 1;
- else
- (pf->msix != 0) ? (rid = 1):(rid = 0);
-
- // TODO: Check for errors from bus_teardown_intr
- // TODO: Check for errors from bus_release_resource
- if (pf->tag != NULL) {
- bus_teardown_intr(dev, pf->res, pf->tag);
- pf->tag = NULL;
- }
- if (pf->res != NULL) {
- bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
- pf->res = NULL;
- }
-
- return (0);
-}
-
-static int
-ixl_teardown_queue_msix(struct ixl_vsi *vsi)
-{
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
- int rid, error = 0;
-
- /* We may get here before stations are setup */
- if ((!ixl_enable_msix) || (que == NULL))
- return (0);
-
- /* Release all MSIX queue resources */
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- rid = que->msix + 1;
- if (que->tag != NULL) {
- error = bus_teardown_intr(dev, que->res, que->tag);
- if (error) {
- device_printf(dev, "bus_teardown_intr() for"
- " Queue %d interrupt failed\n",
- que->me);
- // return (ENXIO);
- }
- que->tag = NULL;
- }
- if (que->res != NULL) {
- error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
- if (error) {
- device_printf(dev, "bus_release_resource() for"
- " Queue %d interrupt failed [rid=%d]\n",
- que->me, rid);
- // return (ENXIO);
- }
- que->res = NULL;
- }
- }
-
- return (0);
-}
-
-static void
-ixl_free_pci_resources(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- int memrid;
-
- ixl_teardown_queue_msix(&pf->vsi);
- ixl_teardown_adminq_msix(pf);
-
- if (pf->msix)
- pci_release_msi(dev);
-
- memrid = PCIR_BAR(IXL_BAR);
-
- if (pf->msix_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- memrid, pf->msix_mem);
-
- if (pf->pci_mem != NULL)
- bus_release_resource(dev, SYS_RES_MEMORY,
- PCIR_BAR(0), pf->pci_mem);
-
- return;
-}
-
-static void
-ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
-{
- /* Display supported media types */
- if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
- phy_type & (1 << I40E_PHY_TYPE_XFI) ||
- phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
- phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
- phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
-
-#ifndef IFM_ETH_XTYPE
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_CX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU) ||
- phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1) ||
- phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC) ||
- phy_type & (1 << I40E_PHY_TYPE_SFI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CX4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
-#else
- if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
- || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_SFI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
-
- if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
- if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
-#endif
-}
-
-/*********************************************************************
- *
- * Setup networking device structure and register an interface.
- *
- **********************************************************************/
-static int
-ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
-{
- struct ifnet *ifp;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
- struct i40e_aq_get_phy_abilities_resp abilities;
- enum i40e_status_code aq_error = 0;
-
- INIT_DEBUGOUT("ixl_setup_interface: begin");
-
- ifp = vsi->ifp = if_alloc(IFT_ETHER);
- if (ifp == NULL) {
- device_printf(dev, "can not allocate ifnet structure\n");
- return (-1);
- }
- if_initname(ifp, device_get_name(dev), device_get_unit(dev));
- ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = IF_Gbps(40);
- ifp->if_init = ixl_init;
- ifp->if_softc = vsi;
- ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
- ifp->if_ioctl = ixl_ioctl;
-
-#if __FreeBSD_version >= 1100036
- if_setgetcounterfn(ifp, ixl_get_counter);
-#endif
-
- ifp->if_transmit = ixl_mq_start;
-
- ifp->if_qflush = ixl_qflush;
-
- ifp->if_snd.ifq_maxlen = que->num_desc - 2;
-
- vsi->max_frame_size =
- ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
- + ETHER_VLAN_ENCAP_LEN;
-
- /*
- * Tell the upper layer(s) we support long frames.
- */
- ifp->if_hdrlen = sizeof(struct ether_vlan_header);
-
- ifp->if_capabilities |= IFCAP_HWCSUM;
- ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
- ifp->if_capabilities |= IFCAP_TSO;
- ifp->if_capabilities |= IFCAP_JUMBO_MTU;
- ifp->if_capabilities |= IFCAP_LRO;
-
- /* VLAN capabilties */
- ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
- | IFCAP_VLAN_HWTSO
- | IFCAP_VLAN_MTU
- | IFCAP_VLAN_HWCSUM;
- ifp->if_capenable = ifp->if_capabilities;
-
- /*
- ** Don't turn this on by default, if vlans are
- ** created on another pseudo device (eg. lagg)
- ** then vlan events are not passed thru, breaking
- ** operation, but with HW FILTER off it works. If
- ** using vlans directly on the ixl driver you can
- ** enable this and get full hardware tag filtering.
- */
- ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
-
- /*
- * Specify the media types supported by this adapter and register
- * callbacks to update media and link information
- */
- ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
- ixl_media_status);
-
- aq_error = i40e_aq_get_phy_capabilities(hw,
- FALSE, TRUE, &abilities, NULL);
- /* May need delay to detect fiber correctly */
- if (aq_error == I40E_ERR_UNKNOWN_PHY) {
- i40e_msec_delay(200);
- aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
- TRUE, &abilities, NULL);
- }
- if (aq_error) {
- if (aq_error == I40E_ERR_UNKNOWN_PHY)
- device_printf(dev, "Unknown PHY type detected!\n");
- else
- device_printf(dev,
- "Error getting supported media types, err %d,"
- " AQ error %d\n", aq_error, hw->aq.asq_last_status);
- return (0);
- }
-
- ixl_add_ifmedia(vsi, abilities.phy_type);
-
- /* Use autoselect media by default */
- ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
- ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
-
- ether_ifattach(ifp, hw->mac.addr);
-
- return (0);
-}
-
-/*
-** Run when the Admin Queue gets a link state change interrupt.
-*/
-static void
-ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct i40e_aqc_get_link_status *status =
- (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
-
- /* Request link status from adapter */
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
-
- /* Print out message if an unqualified module is found */
- if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
- (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
- (!(status->link_info & I40E_AQ_LINK_UP)))
- device_printf(dev, "Link failed because "
- "an unqualified module was detected!\n");
-
- /* Update OS link info */
- ixl_update_link_status(pf);
-}
-
-/*********************************************************************
- *
- * Get Firmware Switch configuration
- * - this will need to be more robust when more complex
- * switch configurations are enabled.
- *
- **********************************************************************/
-static int
-ixl_switch_config(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = vsi->dev;
- struct i40e_aqc_get_switch_config_resp *sw_config;
- u8 aq_buf[I40E_AQ_LARGE_BUF];
- int ret;
- u16 next = 0;
-
- memset(&aq_buf, 0, sizeof(aq_buf));
- sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
- ret = i40e_aq_get_switch_config(hw, sw_config,
- sizeof(aq_buf), &next, NULL);
- if (ret) {
- device_printf(dev, "aq_get_switch_config() failed, error %d,"
- " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
- return (ret);
- }
-#ifdef IXL_DEBUG
- device_printf(dev,
- "Switch config: header reported: %d in structure, %d total\n",
- sw_config->header.num_reported, sw_config->header.num_total);
- for (int i = 0; i < sw_config->header.num_reported; i++) {
- device_printf(dev,
- "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
- sw_config->element[i].element_type,
- sw_config->element[i].seid,
- sw_config->element[i].uplink_seid,
- sw_config->element[i].downlink_seid);
- }
-#endif
- /* Simplified due to a single VSI at the moment */
- vsi->uplink_seid = sw_config->element[0].uplink_seid;
- vsi->downlink_seid = sw_config->element[0].downlink_seid;
- vsi->seid = sw_config->element[0].seid;
- return (ret);
-}
-
-/*********************************************************************
- *
- * Initialize the VSI: this handles contexts, which means things
- * like the number of descriptors, buffer size,
- * plus we init the rings thru this function.
- *
- **********************************************************************/
-static int
-ixl_initialize_vsi(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- struct ixl_queue *que = vsi->queues;
- device_t dev = vsi->dev;
- struct i40e_hw *hw = vsi->hw;
- struct i40e_vsi_context ctxt;
- int err = 0;
-
- memset(&ctxt, 0, sizeof(ctxt));
- ctxt.seid = vsi->seid;
- if (pf->veb_seid != 0)
- ctxt.uplink_seid = pf->veb_seid;
- ctxt.pf_num = hw->pf_id;
- err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
- if (err) {
- device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
- " aq_error %d\n", err, hw->aq.asq_last_status);
- return (err);
- }
-#ifdef IXL_DEBUG
- device_printf(dev, "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
- "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
- "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
- ctxt.uplink_seid, ctxt.vsi_number,
- ctxt.vsis_allocated, ctxt.vsis_unallocated,
- ctxt.flags, ctxt.pf_num, ctxt.vf_num,
- ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
-#endif
- /*
- ** Set the queue and traffic class bits
- ** - when multiple traffic classes are supported
- ** this will need to be more robust.
- */
- ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
- ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
- /* In contig mode, que_mapping[0] is first queue index used by this VSI */
- ctxt.info.queue_mapping[0] = 0;
- /*
- * This VSI will only use traffic class 0; start traffic class 0's
- * queue allocation at queue 0, and assign it 64 (2^6) queues (though
- * the driver may not use all of them).
- */
- ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
- & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
- ((6 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
- & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
-
- /* Set VLAN receive stripping mode */
- ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
- ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
- if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
- ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
- else
- ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
-
- /* Keep copy of VSI info in VSI for statistic counters */
- memcpy(&vsi->info, &ctxt.info, sizeof(ctxt.info));
-
- /* Reset VSI statistics */
- ixl_vsi_reset_stats(vsi);
- vsi->hw_filters_add = 0;
- vsi->hw_filters_del = 0;
-
- ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
-
- err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
- if (err) {
- device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d, aq_error %d\n",
- err, hw->aq.asq_last_status);
- return (err);
- }
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
- struct i40e_hmc_obj_txq tctx;
- struct i40e_hmc_obj_rxq rctx;
- u32 txctl;
- u16 size;
-
- /* Setup the HMC TX Context */
- size = que->num_desc * sizeof(struct i40e_tx_desc);
- memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
- tctx.new_context = 1;
- tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
- tctx.qlen = que->num_desc;
- tctx.fc_ena = 0;
- tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
- /* Enable HEAD writeback */
- tctx.head_wb_ena = 1;
- tctx.head_wb_addr = txr->dma.pa +
- (que->num_desc * sizeof(struct i40e_tx_desc));
- tctx.rdylist_act = 0;
- err = i40e_clear_lan_tx_queue_context(hw, i);
- if (err) {
- device_printf(dev, "Unable to clear TX context\n");
- break;
- }
- err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
- if (err) {
- device_printf(dev, "Unable to set TX context\n");
- break;
- }
- /* Associate the ring with this PF */
- txctl = I40E_QTX_CTL_PF_QUEUE;
- txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
- I40E_QTX_CTL_PF_INDX_MASK);
- wr32(hw, I40E_QTX_CTL(i), txctl);
- ixl_flush(hw);
-
- /* Do ring (re)init */
- ixl_init_tx_ring(que);
-
- /* Next setup the HMC RX Context */
- if (vsi->max_frame_size <= MCLBYTES)
- rxr->mbuf_sz = MCLBYTES;
- else
- rxr->mbuf_sz = MJUMPAGESIZE;
-
- u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
-
- /* Set up an RX context for the HMC */
- memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
- rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
- /* ignore header split for now */
- rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
- rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
- vsi->max_frame_size : max_rxmax;
- rctx.dtype = 0;
- rctx.dsize = 1; /* do 32byte descriptors */
- rctx.hsplit_0 = 0; /* no HDR split initially */
- rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
- rctx.qlen = que->num_desc;
- rctx.tphrdesc_ena = 1;
- rctx.tphwdesc_ena = 1;
- rctx.tphdata_ena = 0;
- rctx.tphhead_ena = 0;
- rctx.lrxqthresh = 2;
- rctx.crcstrip = 1;
- rctx.l2tsel = 1;
- rctx.showiv = 1;
- rctx.fc_ena = 0;
- rctx.prefena = 1;
-
- err = i40e_clear_lan_rx_queue_context(hw, i);
- if (err) {
- device_printf(dev,
- "Unable to clear RX context %d\n", i);
- break;
- }
- err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
- if (err) {
- device_printf(dev, "Unable to set RX context %d\n", i);
- break;
- }
- err = ixl_init_rx_ring(que);
- if (err) {
- device_printf(dev, "Fail in init_rx_ring %d\n", i);
- break;
- }
-#ifdef DEV_NETMAP
- /* preserve queue */
- if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
- struct netmap_adapter *na = NA(vsi->ifp);
- struct netmap_kring *kring = &na->rx_rings[i];
- int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
- } else
-#endif /* DEV_NETMAP */
- wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
- }
- return (err);
-}
-
-
-/*********************************************************************
- *
- * Free all VSI structs.
- *
- **********************************************************************/
-void
-ixl_free_vsi(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct ixl_queue *que = vsi->queues;
-
- /* Free station queues */
- if (!vsi->queues)
- goto free_filters;
-
- for (int i = 0; i < vsi->num_queues; i++, que++) {
- struct tx_ring *txr = &que->txr;
- struct rx_ring *rxr = &que->rxr;
-
- if (!mtx_initialized(&txr->mtx)) /* uninitialized */
- continue;
- IXL_TX_LOCK(txr);
- ixl_free_que_tx(que);
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- IXL_TX_UNLOCK(txr);
- IXL_TX_LOCK_DESTROY(txr);
-
- if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
- continue;
- IXL_RX_LOCK(rxr);
- ixl_free_que_rx(que);
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- IXL_RX_UNLOCK(rxr);
- IXL_RX_LOCK_DESTROY(rxr);
-
- }
- free(vsi->queues, M_DEVBUF);
-
-free_filters:
- /* Free VSI filter list */
- ixl_free_mac_filters(vsi);
-}
-
-static void
-ixl_free_mac_filters(struct ixl_vsi *vsi)
-{
- struct ixl_mac_filter *f;
-
- while (!SLIST_EMPTY(&vsi->ftl)) {
- f = SLIST_FIRST(&vsi->ftl);
- SLIST_REMOVE_HEAD(&vsi->ftl, next);
- free(f, M_DEVBUF);
- }
-}
-
-
-/*********************************************************************
- *
- * Allocate memory for the VSI (virtual station interface) and their
- * associated queues, rings and the descriptors associated with each,
- * called only once at attach.
- *
- **********************************************************************/
-static int
-ixl_setup_stations(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi;
- struct ixl_queue *que;
- struct tx_ring *txr;
- struct rx_ring *rxr;
- int rsize, tsize;
- int error = I40E_SUCCESS;
-
- vsi = &pf->vsi;
- vsi->back = (void *)pf;
- vsi->hw = &pf->hw;
- vsi->id = 0;
- vsi->num_vlans = 0;
- vsi->back = pf;
-
- /* Get memory for the station queues */
- if (!(vsi->queues =
- (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
- vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
- device_printf(dev, "Unable to allocate queue memory\n");
- error = ENOMEM;
- goto early;
- }
-
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- que->num_desc = ixl_ringsz;
- que->me = i;
- que->vsi = vsi;
- /* mark the queue as active */
- vsi->active_queues |= (u64)1 << que->me;
- txr = &que->txr;
- txr->que = que;
- txr->tail = I40E_QTX_TAIL(que->me);
-
- /* Initialize the TX lock */
- snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
- /* Create the TX descriptor ring */
- tsize = roundup2((que->num_desc *
- sizeof(struct i40e_tx_desc)) +
- sizeof(u32), DBA_ALIGN);
- if (i40e_allocate_dma_mem(&pf->hw,
- &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
- device_printf(dev,
- "Unable to allocate TX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- txr->base = (struct i40e_tx_desc *)txr->dma.va;
- bzero((void *)txr->base, tsize);
- /* Now allocate transmit soft structs for the ring */
- if (ixl_allocate_tx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up TX structures\n");
- error = ENOMEM;
- goto fail;
- }
- /* Allocate a buf ring */
- txr->br = buf_ring_alloc(4096, M_DEVBUF,
- M_NOWAIT, &txr->mtx);
- if (txr->br == NULL) {
- device_printf(dev,
- "Critical Failure setting up TX buf ring\n");
- error = ENOMEM;
- goto fail;
- }
-
- /*
- * Next the RX queues...
- */
- rsize = roundup2(que->num_desc *
- sizeof(union i40e_rx_desc), DBA_ALIGN);
- rxr = &que->rxr;
- rxr->que = que;
- rxr->tail = I40E_QRX_TAIL(que->me);
-
- /* Initialize the RX side lock */
- snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
- device_get_nameunit(dev), que->me);
- mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
-
- if (i40e_allocate_dma_mem(&pf->hw,
- &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
- device_printf(dev,
- "Unable to allocate RX Descriptor memory\n");
- error = ENOMEM;
- goto fail;
- }
- rxr->base = (union i40e_rx_desc *)rxr->dma.va;
- bzero((void *)rxr->base, rsize);
-
- /* Allocate receive soft structs for the ring*/
- if (ixl_allocate_rx_data(que)) {
- device_printf(dev,
- "Critical Failure setting up receive structs\n");
- error = ENOMEM;
- goto fail;
- }
- }
-
- return (0);
-
-fail:
- for (int i = 0; i < vsi->num_queues; i++) {
- que = &vsi->queues[i];
- rxr = &que->rxr;
- txr = &que->txr;
- if (rxr->base)
- i40e_free_dma_mem(&pf->hw, &rxr->dma);
- if (txr->base)
- i40e_free_dma_mem(&pf->hw, &txr->dma);
- }
-
-early:
- return (error);
-}
-
-/*
-** Provide a update to the queue RX
-** interrupt moderation value.
-*/
-static void
-ixl_set_queue_rx_itr(struct ixl_queue *que)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct rx_ring *rxr = &que->rxr;
- u16 rx_itr;
- u16 rx_latency = 0;
- int rx_bytes;
-
- /* Idle, do nothing */
- if (rxr->bytes == 0)
- return;
-
- if (ixl_dynamic_rx_itr) {
- rx_bytes = rxr->bytes/rxr->itr;
- rx_itr = rxr->itr;
-
- /* Adjust latency range */
- switch (rxr->latency) {
- case IXL_LOW_LATENCY:
- if (rx_bytes > 10) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (rx_bytes > 20) {
- rx_latency = IXL_BULK_LATENCY;
- rx_itr = IXL_ITR_8K;
- } else if (rx_bytes <= 10) {
- rx_latency = IXL_LOW_LATENCY;
- rx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (rx_bytes <= 20) {
- rx_latency = IXL_AVE_LATENCY;
- rx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- rxr->latency = rx_latency;
-
- if (rx_itr != rxr->itr) {
- /* do an exponential smoothing */
- rx_itr = (10 * rx_itr * rxr->itr) /
- ((9 * rx_itr) + rxr->itr);
- rxr->itr = rx_itr & IXL_MAX_ITR;
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- que->me), rxr->itr);
- }
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->rx_itr_setting = ixl_rx_itr;
- /* Update the hardware if needed */
- if (rxr->itr != vsi->rx_itr_setting) {
- rxr->itr = vsi->rx_itr_setting;
- wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
- que->me), rxr->itr);
- }
- }
- rxr->bytes = 0;
- rxr->packets = 0;
- return;
-}
-
-
-/*
-** Provide a update to the queue TX
-** interrupt moderation value.
-*/
-static void
-ixl_set_queue_tx_itr(struct ixl_queue *que)
-{
- struct ixl_vsi *vsi = que->vsi;
- struct i40e_hw *hw = vsi->hw;
- struct tx_ring *txr = &que->txr;
- u16 tx_itr;
- u16 tx_latency = 0;
- int tx_bytes;
-
-
- /* Idle, do nothing */
- if (txr->bytes == 0)
- return;
-
- if (ixl_dynamic_tx_itr) {
- tx_bytes = txr->bytes/txr->itr;
- tx_itr = txr->itr;
-
- switch (txr->latency) {
- case IXL_LOW_LATENCY:
- if (tx_bytes > 10) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- case IXL_AVE_LATENCY:
- if (tx_bytes > 20) {
- tx_latency = IXL_BULK_LATENCY;
- tx_itr = IXL_ITR_8K;
- } else if (tx_bytes <= 10) {
- tx_latency = IXL_LOW_LATENCY;
- tx_itr = IXL_ITR_100K;
- }
- break;
- case IXL_BULK_LATENCY:
- if (tx_bytes <= 20) {
- tx_latency = IXL_AVE_LATENCY;
- tx_itr = IXL_ITR_20K;
- }
- break;
- }
-
- txr->latency = tx_latency;
-
- if (tx_itr != txr->itr) {
- /* do an exponential smoothing */
- tx_itr = (10 * tx_itr * txr->itr) /
- ((9 * tx_itr) + txr->itr);
- txr->itr = tx_itr & IXL_MAX_ITR;
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
- que->me), txr->itr);
- }
-
- } else { /* We may have have toggled to non-dynamic */
- if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
- vsi->tx_itr_setting = ixl_tx_itr;
- /* Update the hardware if needed */
- if (txr->itr != vsi->tx_itr_setting) {
- txr->itr = vsi->tx_itr_setting;
- wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
- que->me), txr->itr);
- }
- }
- txr->bytes = 0;
- txr->packets = 0;
- return;
-}
-
-#define QUEUE_NAME_LEN 32
-
-static void
-ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
- struct sysctl_ctx_list *ctx, const char *sysctl_name)
-{
- struct sysctl_oid *tree;
- struct sysctl_oid_list *child;
- struct sysctl_oid_list *vsi_list;
-
- tree = device_get_sysctl_tree(pf->dev);
- child = SYSCTL_CHILDREN(tree);
- vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
- CTLFLAG_RD, NULL, "VSI Number");
- vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
-
- ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
-}
-
-#ifdef IXL_DEBUG
-/**
- * ixl_sysctl_qtx_tail_handler
- * Retrieves I40E_QTX_TAIL value from hardware
- * for a sysctl.
- */
-static int
-ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
-
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
-
- val = rd32(que->vsi->hw, que->txr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-
-/**
- * ixl_sysctl_qrx_tail_handler
- * Retrieves I40E_QRX_TAIL value from hardware
- * for a sysctl.
- */
-static int
-ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_queue *que;
- int error;
- u32 val;
-
- que = ((struct ixl_queue *)oidp->oid_arg1);
- if (!que) return 0;
-
- val = rd32(que->vsi->hw, que->rxr.tail);
- error = sysctl_handle_int(oidp, &val, 0, req);
- if (error || !req->newptr)
- return error;
- return (0);
-}
-#endif
-
-static void
-ixl_add_hw_stats(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *queues = vsi->queues;
- struct i40e_hw_port_stats *pf_stats = &pf->stats;
-
- struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
- struct sysctl_oid *tree = device_get_sysctl_tree(dev);
- struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
- struct sysctl_oid_list *vsi_list;
-
- struct sysctl_oid *queue_node;
- struct sysctl_oid_list *queue_list;
-
- struct tx_ring *txr;
- struct rx_ring *rxr;
- char queue_namebuf[QUEUE_NAME_LEN];
-
- /* Driver statistics */
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
- CTLFLAG_RD, &pf->watchdog_events,
- "Watchdog timeouts");
- SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
- CTLFLAG_RD, &pf->admin_irq,
- "Admin Queue IRQ Handled");
-
- ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
- vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
-
- /* Queue statistics */
- for (int q = 0; q < vsi->num_queues; q++) {
- snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
- queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
- OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
- queue_list = SYSCTL_CHILDREN(queue_node);
-
- txr = &(queues[q].txr);
- rxr = &(queues[q].rxr);
-
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
- CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
- "m_defrag() failed");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
- CTLFLAG_RD, &(queues[q].irqs),
- "irqs on this queue");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
- CTLFLAG_RD, &(queues[q].tso),
- "TSO");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
- CTLFLAG_RD, &(queues[q].tx_dma_setup),
- "Driver tx dma failure in xmit");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
- CTLFLAG_RD, &(txr->no_desc),
- "Queue No Descriptor Available");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
- CTLFLAG_RD, &(txr->total_packets),
- "Queue Packets Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
- CTLFLAG_RD, &(txr->tx_bytes),
- "Queue Bytes Transmitted");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
- CTLFLAG_RD, &(rxr->rx_packets),
- "Queue Packets Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
- CTLFLAG_RD, &(rxr->rx_bytes),
- "Queue Bytes Received");
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
- CTLFLAG_RD, &(rxr->desc_errs),
- "Queue Rx Descriptor Errors");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
- CTLFLAG_RD, &(rxr->itr), 0,
- "Queue Rx ITR Interval");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
- CTLFLAG_RD, &(txr->itr), 0,
- "Queue Tx ITR Interval");
- // Not actual latency; just a calculated value to put in a register
- // TODO: Put in better descriptions here
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_latency",
- CTLFLAG_RD, &(rxr->latency), 0,
- "Queue Rx ITRL Average Interval");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_latency",
- CTLFLAG_RD, &(txr->latency), 0,
- "Queue Tx ITRL Average Interval");
-
-#ifdef IXL_DEBUG
- SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
- CTLFLAG_RD, &(rxr->not_done),
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
- CTLFLAG_RD, &(rxr->next_refresh), 0,
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
- CTLFLAG_RD, &(rxr->next_check), 0,
- "Queue Rx Descriptors not Done");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qtx_tail_handler, "IU",
- "Queue Transmit Descriptor Tail");
- SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
- CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
- sizeof(struct ixl_queue),
- ixl_sysctl_qrx_tail_handler, "IU",
- "Queue Receive Descriptor Tail");
-#endif
- }
-
- /* MAC stats */
- ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
-}
-
-static void
-ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
- struct sysctl_oid_list *child,
- struct i40e_eth_stats *eth_stats)
-{
- struct ixl_sysctl_info ctls[] =
- {
- {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
- {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
- "Unicast Packets Received"},
- {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
- "Multicast Packets Received"},
- {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
- "Broadcast Packets Received"},
- {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
- {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
- {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
- {&eth_stats->tx_multicast, "mcast_pkts_txd",
- "Multicast Packets Transmitted"},
- {&eth_stats->tx_broadcast, "bcast_pkts_txd",
- "Broadcast Packets Transmitted"},
- // end
- {0,0,0}
- };
-
- struct ixl_sysctl_info *entry = ctls;
- while (entry->stat != NULL)
- {
- SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
- CTLFLAG_RD, entry->stat,
- entry->description);
- entry++;
- }
-}
-
-static void
-ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
- struct sysctl_oid_list *child,
- struct i40e_hw_port_stats *stats)
-{
- struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
- CTLFLAG_RD, NULL, "Mac Statistics");
- struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
-
- struct i40e_eth_stats *eth_stats = &stats->eth;
- ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
-
- struct ixl_sysctl_info ctls[] =
- {
- {&stats->crc_errors, "crc_errors", "CRC Errors"},
- {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
- {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
- {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
- {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
- /* Packet Reception Stats */
- {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
- {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
- {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
- {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
- {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
- {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
- {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
- {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
- {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
- {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
- {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
- {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
- /* Packet Transmission Stats */
- {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
- {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
- {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
- {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
- {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
- {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
- {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
- /* Flow control */
- {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
- {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
- {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
- {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
- /* End */
- {0,0,0}
- };
-
- struct ixl_sysctl_info *entry = ctls;
- while (entry->stat != NULL)
- {
- SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
- CTLFLAG_RD, entry->stat,
- entry->description);
- entry++;
- }
-}
-
-
-/*
-** ixl_config_rss - setup RSS
-** - note this is done for the single vsi
-*/
-static void
-ixl_config_rss(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct i40e_hw *hw = vsi->hw;
- u32 lut = 0;
- u64 set_hena = 0, hena;
- int i, j, que_id;
-#ifdef RSS
- u32 rss_hash_config;
- u32 rss_seed[IXL_KEYSZ];
-#else
- u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c,
- 0x35897377, 0x328b25e1, 0x4fa98922,
- 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
-#endif
-
-#ifdef RSS
- /* Fetch the configured RSS key */
- rss_getkey((uint8_t *) &rss_seed);
-#endif
-
- /* Fill out hash function seed */
- for (i = 0; i < IXL_KEYSZ; i++)
- i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
-
- /* Enable PCTYPES for RSS: */
-#ifdef RSS
- rss_hash_config = rss_gethashconfig();
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
- if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
- if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
- if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
- if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
- set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
-#else
- set_hena =
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
-#endif
- hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
- ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
- hena |= set_hena;
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
- i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
-
- /* Populate the LUT with max no. of queues in round robin fashion */
- for (i = j = 0; i < pf->hw.func_caps.rss_table_size; i++, j++) {
- if (j == vsi->num_queues)
- j = 0;
-#ifdef RSS
- /*
- * Fetch the RSS bucket id for the given indirection entry.
- * Cap it at the number of configured buckets (which is
- * num_queues.)
- */
- que_id = rss_get_indirection_to_bucket(i);
- que_id = que_id % vsi->num_queues;
-#else
- que_id = j;
-#endif
- /* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (que_id &
- ((0x1 << pf->hw.func_caps.rss_table_entry_width) - 1));
- /* On i = 3, we have 4 entries in lut; write to the register */
- if ((i & 3) == 3)
- wr32(hw, I40E_PFQF_HLUT(i >> 2), lut);
- }
- ixl_flush(hw);
-}
-
-
-/*
-** This routine is run via an vlan config EVENT,
-** it enables us to use the HW Filter table since
-** we can get the vlan id. This just creates the
-** entry in the soft version of the VFTA, init will
-** repopulate the real table.
-*/
-static void
-ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg) /* Not our event */
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- ++vsi->num_vlans;
- ixl_add_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** This routine is run via an vlan
-** unconfig EVENT, remove our entry
-** in the soft vfta.
-*/
-static void
-ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
-{
- struct ixl_vsi *vsi = ifp->if_softc;
- struct i40e_hw *hw = vsi->hw;
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
-
- if (ifp->if_softc != arg)
- return;
-
- if ((vtag == 0) || (vtag > 4095)) /* Invalid */
- return;
-
- IXL_PF_LOCK(pf);
- --vsi->num_vlans;
- ixl_del_filter(vsi, hw->mac.addr, vtag);
- IXL_PF_UNLOCK(pf);
-}
-
-/*
-** This routine updates vlan filters, called by init
-** it scans the filter table and then updates the hw
-** after a soft reset.
-*/
-static void
-ixl_setup_vlan_filters(struct ixl_vsi *vsi)
-{
- struct ixl_mac_filter *f;
- int cnt = 0, flags;
-
- if (vsi->num_vlans == 0)
- return;
- /*
- ** Scan the filter list for vlan entries,
- ** mark them for addition and then call
- ** for the AQ update.
- */
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags & IXL_FILTER_VLAN) {
- f->flags |=
- (IXL_FILTER_ADD |
- IXL_FILTER_USED);
- cnt++;
- }
- }
- if (cnt == 0) {
- printf("setup vlan: no filters found!\n");
- return;
- }
- flags = IXL_FILTER_VLAN;
- flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
- ixl_add_hw_filters(vsi, flags, cnt);
- return;
-}
-
-/*
-** Initialize filter list and add filters that the hardware
-** needs to know about.
-**
-** Requires VSI's filter list & seid to be set before calling.
-*/
-static void
-ixl_init_filters(struct ixl_vsi *vsi)
-{
- /* Add broadcast address */
- ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
-
- /*
- * Prevent Tx flow control frames from being sent out by
- * non-firmware transmitters.
- */
- i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
-}
-
-/*
-** This routine adds mulicast filters
-*/
-static void
-ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
-{
- struct ixl_mac_filter *f;
-
- /* Does one already exist */
- f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
- if (f != NULL)
- return;
-
- f = ixl_get_filter(vsi);
- if (f == NULL) {
- printf("WARNING: no filter available!!\n");
- return;
- }
- bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
- f->vlan = IXL_VLAN_ANY;
- f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
- | IXL_FILTER_MC);
-
- return;
-}
-
-static void
-ixl_reconfigure_filters(struct ixl_vsi *vsi)
-{
-
- ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
-}
-
-/*
-** This routine adds macvlan filters
-*/
-static void
-ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
-{
- struct ixl_mac_filter *f, *tmp;
- struct ixl_pf *pf;
- device_t dev;
-
- DEBUGOUT("ixl_add_filter: begin");
-
- pf = vsi->back;
- dev = pf->dev;
-
- /* Does one already exist */
- f = ixl_find_filter(vsi, macaddr, vlan);
- if (f != NULL)
- return;
- /*
- ** Is this the first vlan being registered, if so we
- ** need to remove the ANY filter that indicates we are
- ** not in a vlan, and replace that with a 0 filter.
- */
- if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
- tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
- if (tmp != NULL) {
- ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
- ixl_add_filter(vsi, macaddr, 0);
- }
- }
-
- f = ixl_get_filter(vsi);
- if (f == NULL) {
- device_printf(dev, "WARNING: no filter available!!\n");
- return;
- }
- bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
- f->vlan = vlan;
- f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
- if (f->vlan != IXL_VLAN_ANY)
- f->flags |= IXL_FILTER_VLAN;
- else
- vsi->num_macs++;
-
- ixl_add_hw_filters(vsi, f->flags, 1);
- return;
-}
-
-static void
-ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
-{
- struct ixl_mac_filter *f;
-
- f = ixl_find_filter(vsi, macaddr, vlan);
- if (f == NULL)
- return;
-
- f->flags |= IXL_FILTER_DEL;
- ixl_del_hw_filters(vsi, 1);
- vsi->num_macs--;
-
- /* Check if this is the last vlan removal */
- if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
- /* Switch back to a non-vlan filter */
- ixl_del_filter(vsi, macaddr, 0);
- ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
- }
- return;
-}
-
-/*
-** Find the filter with both matching mac addr and vlan id
-*/
-static struct ixl_mac_filter *
-ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
-{
- struct ixl_mac_filter *f;
- bool match = FALSE;
-
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (!cmp_etheraddr(f->macaddr, macaddr))
- continue;
- if (f->vlan == vlan) {
- match = TRUE;
- break;
- }
- }
-
- if (!match)
- f = NULL;
- return (f);
-}
-
-/*
-** This routine takes additions to the vsi filter
-** table and creates an Admin Queue call to create
-** the filters in the hardware.
-*/
-static void
-ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
-{
- struct i40e_aqc_add_macvlan_element_data *a, *b;
- struct ixl_mac_filter *f;
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- device_t dev;
- int err, j = 0;
-
- pf = vsi->back;
- dev = pf->dev;
- hw = &pf->hw;
- IXL_PF_LOCK_ASSERT(pf);
-
- a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (a == NULL) {
- device_printf(dev, "add_hw_filters failed to get memory\n");
- return;
- }
-
- /*
- ** Scan the filter list, each time we find one
- ** we add it to the admin queue array and turn off
- ** the add bit.
- */
- SLIST_FOREACH(f, &vsi->ftl, next) {
- if (f->flags == flags) {
- b = &a[j]; // a pox on fvl long names :)
- bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
- if (f->vlan == IXL_VLAN_ANY) {
- b->vlan_tag = 0;
- b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
- } else {
- b->vlan_tag = f->vlan;
- b->flags = 0;
- }
- b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
- f->flags &= ~IXL_FILTER_ADD;
- j++;
- }
- if (j == cnt)
- break;
- }
- if (j > 0) {
- err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
- if (err)
- device_printf(dev, "aq_add_macvlan err %d, "
- "aq_error %d\n", err, hw->aq.asq_last_status);
- else
- vsi->hw_filters_add += j;
- }
- free(a, M_DEVBUF);
- return;
-}
-
-/*
-** This routine takes removals in the vsi filter
-** table and creates an Admin Queue call to delete
-** the filters in the hardware.
-*/
-static void
-ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
-{
- struct i40e_aqc_remove_macvlan_element_data *d, *e;
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- device_t dev;
- struct ixl_mac_filter *f, *f_temp;
- int err, j = 0;
-
- DEBUGOUT("ixl_del_hw_filters: begin\n");
-
- pf = vsi->back;
- hw = &pf->hw;
- dev = pf->dev;
-
- d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (d == NULL) {
- printf("del hw filter failed to get memory\n");
- return;
- }
-
- SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
- if (f->flags & IXL_FILTER_DEL) {
- e = &d[j]; // a pox on fvl long names :)
- bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
- e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
- e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
- /* delete entry from vsi list */
- SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
- free(f, M_DEVBUF);
- j++;
- }
- if (j == cnt)
- break;
- }
- if (j > 0) {
- err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
- /* NOTE: returns ENOENT every time but seems to work fine,
- so we'll ignore that specific error. */
- // TODO: Does this still occur on current firmwares?
- if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
- int sc = 0;
- for (int i = 0; i < j; i++)
- sc += (!d[i].error_code);
- vsi->hw_filters_del += sc;
- device_printf(dev,
- "Failed to remove %d/%d filters, aq error %d\n",
- j - sc, j, hw->aq.asq_last_status);
- } else
- vsi->hw_filters_del += j;
- }
- free(d, M_DEVBUF);
-
- DEBUGOUT("ixl_del_hw_filters: end\n");
- return;
-}
-
-static int
-ixl_enable_rings(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int index, error;
- u32 reg;
-
- error = 0;
- for (int i = 0; i < vsi->num_queues; i++) {
- index = vsi->first_queue + i;
- i40e_pre_tx_queue_cfg(hw, index, TRUE);
-
- reg = rd32(hw, I40E_QTX_ENA(index));
- reg |= I40E_QTX_ENA_QENA_REQ_MASK |
- I40E_QTX_ENA_QENA_STAT_MASK;
- wr32(hw, I40E_QTX_ENA(index), reg);
- /* Verify the enable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QTX_ENA(index));
- if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
- break;
- i40e_msec_delay(10);
- }
- if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
- device_printf(pf->dev, "TX queue %d disabled!\n",
- index);
- error = ETIMEDOUT;
- }
-
- reg = rd32(hw, I40E_QRX_ENA(index));
- reg |= I40E_QRX_ENA_QENA_REQ_MASK |
- I40E_QRX_ENA_QENA_STAT_MASK;
- wr32(hw, I40E_QRX_ENA(index), reg);
- /* Verify the enable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QRX_ENA(index));
- if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
- break;
- i40e_msec_delay(10);
- }
- if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
- device_printf(pf->dev, "RX queue %d disabled!\n",
- index);
- error = ETIMEDOUT;
- }
- }
-
- return (error);
-}
-
-static int
-ixl_disable_rings(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = vsi->back;
- struct i40e_hw *hw = &pf->hw;
- int index, error;
- u32 reg;
-
- error = 0;
- for (int i = 0; i < vsi->num_queues; i++) {
- index = vsi->first_queue + i;
-
- i40e_pre_tx_queue_cfg(hw, index, FALSE);
- i40e_usec_delay(500);
-
- reg = rd32(hw, I40E_QTX_ENA(index));
- reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QTX_ENA(index), reg);
- /* Verify the disable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QTX_ENA(index));
- if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
- break;
- i40e_msec_delay(10);
- }
- if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
- device_printf(pf->dev, "TX queue %d still enabled!\n",
- index);
- error = ETIMEDOUT;
- }
-
- reg = rd32(hw, I40E_QRX_ENA(index));
- reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
- wr32(hw, I40E_QRX_ENA(index), reg);
- /* Verify the disable took */
- for (int j = 0; j < 10; j++) {
- reg = rd32(hw, I40E_QRX_ENA(index));
- if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
- break;
- i40e_msec_delay(10);
- }
- if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
- device_printf(pf->dev, "RX queue %d still enabled!\n",
- index);
- error = ETIMEDOUT;
- }
- }
-
- return (error);
-}
-
-/**
- * ixl_handle_mdd_event
- *
- * Called from interrupt handler to identify possibly malicious vfs
- * (But also detects events from the PF, as well)
- **/
-static void
-ixl_handle_mdd_event(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- bool mdd_detected = false;
- bool pf_mdd_detected = false;
- u32 reg;
-
- /* find what triggered the MDD event */
- reg = rd32(hw, I40E_GL_MDET_TX);
- if (reg & I40E_GL_MDET_TX_VALID_MASK) {
- u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
- I40E_GL_MDET_TX_PF_NUM_SHIFT;
- u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
- I40E_GL_MDET_TX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
- I40E_GL_MDET_TX_QUEUE_SHIFT;
- device_printf(dev,
- "Malicious Driver Detection event 0x%02x"
- " on TX queue %d pf number 0x%02x\n",
- event, queue, pf_num);
- wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
- mdd_detected = true;
- }
- reg = rd32(hw, I40E_GL_MDET_RX);
- if (reg & I40E_GL_MDET_RX_VALID_MASK) {
- u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
- I40E_GL_MDET_RX_FUNCTION_SHIFT;
- u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
- I40E_GL_MDET_RX_EVENT_SHIFT;
- u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
- I40E_GL_MDET_RX_QUEUE_SHIFT;
- device_printf(dev,
- "Malicious Driver Detection event 0x%02x"
- " on RX queue %d of function 0x%02x\n",
- event, queue, func);
- wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
- mdd_detected = true;
- }
-
- if (mdd_detected) {
- reg = rd32(hw, I40E_PF_MDET_TX);
- if (reg & I40E_PF_MDET_TX_VALID_MASK) {
- wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
- device_printf(dev,
- "MDD TX event is for this function 0x%08x",
- reg);
- pf_mdd_detected = true;
- }
- reg = rd32(hw, I40E_PF_MDET_RX);
- if (reg & I40E_PF_MDET_RX_VALID_MASK) {
- wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
- device_printf(dev,
- "MDD RX event is for this function 0x%08x",
- reg);
- pf_mdd_detected = true;
- }
- }
-
- /* re-enable mdd interrupt cause */
- reg = rd32(hw, I40E_PFINT_ICR0_ENA);
- reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, reg);
- ixl_flush(hw);
-}
-
-static void
-ixl_enable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
-
- if (ixl_enable_msix) {
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_enable_queue(hw, que->me);
- } else
- ixl_enable_legacy(hw);
-}
-
-static void
-ixl_disable_rings_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
- struct ixl_queue *que = vsi->queues;
-
- for (int i = 0; i < vsi->num_queues; i++, que++)
- ixl_disable_queue(hw, que->me);
-}
-
-static void
-ixl_disable_intr(struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw = vsi->hw;
-
- if (ixl_enable_msix)
- ixl_disable_adminq(hw);
- else
- ixl_disable_legacy(hw);
-}
-
-static void
-ixl_enable_adminq(struct i40e_hw *hw)
-{
- u32 reg;
-
- reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
- ixl_flush(hw);
-}
-
-static void
-ixl_disable_adminq(struct i40e_hw *hw)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
- ixl_flush(hw);
-}
-
-static void
-ixl_enable_queue(struct i40e_hw *hw, int id)
-{
- u32 reg;
-
- reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
- I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
-}
-
-static void
-ixl_disable_queue(struct i40e_hw *hw, int id)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
-}
-
-static void
-ixl_enable_legacy(struct i40e_hw *hw)
-{
- u32 reg;
- reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
- I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
- (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-}
-
-static void
-ixl_disable_legacy(struct i40e_hw *hw)
-{
- u32 reg;
-
- reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
- wr32(hw, I40E_PFINT_DYN_CTL0, reg);
-}
-
-static void
-ixl_update_stats_counters(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_vf *vf;
-
- struct i40e_hw_port_stats *nsd = &pf->stats;
- struct i40e_hw_port_stats *osd = &pf->stats_offsets;
-
- /* Update hw stats */
- ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
- pf->stat_offsets_loaded,
- &osd->crc_errors, &nsd->crc_errors);
- ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
- pf->stat_offsets_loaded,
- &osd->illegal_bytes, &nsd->illegal_bytes);
- ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
- I40E_GLPRT_GORCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
- ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
- I40E_GLPRT_GOTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
- ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_discards,
- &nsd->eth.rx_discards);
- ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
- I40E_GLPRT_UPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_unicast,
- &nsd->eth.rx_unicast);
- ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
- I40E_GLPRT_UPTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_unicast,
- &nsd->eth.tx_unicast);
- ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
- I40E_GLPRT_MPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_multicast,
- &nsd->eth.rx_multicast);
- ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
- I40E_GLPRT_MPTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_multicast,
- &nsd->eth.tx_multicast);
- ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
- I40E_GLPRT_BPRCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.rx_broadcast,
- &nsd->eth.rx_broadcast);
- ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
- I40E_GLPRT_BPTCL(hw->port),
- pf->stat_offsets_loaded,
- &osd->eth.tx_broadcast,
- &nsd->eth.tx_broadcast);
-
- ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_dropped_link_down,
- &nsd->tx_dropped_link_down);
- ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_local_faults,
- &nsd->mac_local_faults);
- ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->mac_remote_faults,
- &nsd->mac_remote_faults);
- ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_length_errors,
- &nsd->rx_length_errors);
-
- /* Flow control (LFC) stats */
- ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_rx, &nsd->link_xon_rx);
- ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xon_tx, &nsd->link_xon_tx);
- ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_rx, &nsd->link_xoff_rx);
- ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
- pf->stat_offsets_loaded,
- &osd->link_xoff_tx, &nsd->link_xoff_tx);
-
- /* Packet size stats rx */
- ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
- I40E_GLPRT_PRC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_64, &nsd->rx_size_64);
- ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
- I40E_GLPRT_PRC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_127, &nsd->rx_size_127);
- ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
- I40E_GLPRT_PRC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_255, &nsd->rx_size_255);
- ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
- I40E_GLPRT_PRC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_511, &nsd->rx_size_511);
- ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
- I40E_GLPRT_PRC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1023, &nsd->rx_size_1023);
- ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
- I40E_GLPRT_PRC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_1522, &nsd->rx_size_1522);
- ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
- I40E_GLPRT_PRC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_size_big, &nsd->rx_size_big);
-
- /* Packet size stats tx */
- ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
- I40E_GLPRT_PTC64L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_64, &nsd->tx_size_64);
- ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
- I40E_GLPRT_PTC127L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_127, &nsd->tx_size_127);
- ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
- I40E_GLPRT_PTC255L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_255, &nsd->tx_size_255);
- ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
- I40E_GLPRT_PTC511L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_511, &nsd->tx_size_511);
- ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
- I40E_GLPRT_PTC1023L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1023, &nsd->tx_size_1023);
- ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
- I40E_GLPRT_PTC1522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_1522, &nsd->tx_size_1522);
- ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
- I40E_GLPRT_PTC9522L(hw->port),
- pf->stat_offsets_loaded,
- &osd->tx_size_big, &nsd->tx_size_big);
-
- ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_undersize, &nsd->rx_undersize);
- ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_fragments, &nsd->rx_fragments);
- ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_oversize, &nsd->rx_oversize);
- ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
- pf->stat_offsets_loaded,
- &osd->rx_jabber, &nsd->rx_jabber);
- pf->stat_offsets_loaded = true;
- /* End hw stats */
-
- /* Update vsi stats */
- ixl_update_vsi_stats(vsi);
-
- for (int i = 0; i < pf->num_vfs; i++) {
- vf = &pf->vfs[i];
- if (vf->vf_flags & VF_FLAG_ENABLED)
- ixl_update_eth_stats(&pf->vfs[i].vsi);
- }
-}
-
-static int
-ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- device_t dev = pf->dev;
- bool is_up = false;
- int error = 0;
-
- is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
-
- /* Teardown */
- if (is_up)
- ixl_stop(pf);
- error = i40e_shutdown_lan_hmc(hw);
- if (error)
- device_printf(dev,
- "Shutdown LAN HMC failed with code %d\n", error);
- ixl_disable_adminq(hw);
- ixl_teardown_adminq_msix(pf);
- error = i40e_shutdown_adminq(hw);
- if (error)
- device_printf(dev,
- "Shutdown Admin queue failed with code %d\n", error);
-
- /* Setup */
- error = i40e_init_adminq(hw);
- if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
- device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
- error);
- }
- error = ixl_setup_adminq_msix(pf);
- if (error) {
- device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
- error);
- }
- ixl_configure_intr0_msix(pf);
- ixl_enable_adminq(hw);
- error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
- hw->func_caps.num_rx_qp, 0, 0);
- if (error) {
- device_printf(dev, "init_lan_hmc failed: %d\n", error);
- }
- error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
- if (error) {
- device_printf(dev, "configure_lan_hmc failed: %d\n", error);
- }
- if (is_up)
- ixl_init(pf);
-
- return (0);
-}
-
-static void
-ixl_handle_empr_reset(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int count = 0;
- u32 reg;
-
- /* Typically finishes within 3-4 seconds */
- while (count++ < 100) {
- reg = rd32(hw, I40E_GLGEN_RSTAT)
- & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
- if (reg)
- i40e_msec_delay(100);
- else
- break;
- }
-#ifdef IXL_DEBUG
- // Reset-related
- device_printf(dev, "EMPR reset wait count: %d\n", count);
-#endif
-
- device_printf(dev, "Rebuilding driver state...\n");
- ixl_rebuild_hw_structs_after_reset(pf);
- device_printf(dev, "Rebuilding driver state done.\n");
-
- atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
-}
-
-/*
-** Tasklet handler for MSIX Adminq interrupts
-** - do outside interrupt since it might sleep
-*/
-static void
-ixl_do_adminq(void *context, int pending)
-{
- struct ixl_pf *pf = context;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_arq_event_info event;
- i40e_status ret;
- device_t dev = pf->dev;
- u32 loop = 0;
- u16 opcode, result;
-
- if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
- /* Flag cleared at end of this function */
- ixl_handle_empr_reset(pf);
- return;
- }
-
- /* Admin Queue handling */
- event.buf_len = IXL_AQ_BUF_SZ;
- event.msg_buf = malloc(event.buf_len,
- M_DEVBUF, M_NOWAIT | M_ZERO);
- if (!event.msg_buf) {
- device_printf(dev, "%s: Unable to allocate memory for Admin"
- " Queue event!\n", __func__);
- return;
- }
-
- IXL_PF_LOCK(pf);
- /* clean and process any events */
- do {
- ret = i40e_clean_arq_element(hw, &event, &result);
- if (ret)
- break;
- opcode = LE16_TO_CPU(event.desc.opcode);
-#ifdef IXL_DEBUG
- device_printf(dev, "%s: Admin Queue event: %#06x\n", __func__,
- opcode);
-#endif
- switch (opcode) {
- case i40e_aqc_opc_get_link_status:
- ixl_link_event(pf, &event);
- break;
- case i40e_aqc_opc_send_msg_to_pf:
-#ifdef PCI_IOV
- ixl_handle_vf_msg(pf, &event);
-#endif
- break;
- case i40e_aqc_opc_event_lan_overflow:
- default:
- break;
- }
-
- } while (result && (loop++ < IXL_ADM_LIMIT));
-
- free(event.msg_buf, M_DEVBUF);
-
- /*
- * If there are still messages to process, reschedule ourselves.
- * Otherwise, re-enable our interrupt and go to sleep.
- */
- if (result > 0)
- taskqueue_enqueue(pf->tq, &pf->adminq);
- else
- ixl_enable_adminq(hw);
-
- IXL_PF_UNLOCK(pf);
-}
-
-/**
- * Update VSI-specific ethernet statistics counters.
- **/
-void
-ixl_update_eth_stats(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_eth_stats *es;
- struct i40e_eth_stats *oes;
- struct i40e_hw_port_stats *nsd;
- u16 stat_idx = vsi->info.stat_counter_idx;
-
- es = &vsi->eth_stats;
- oes = &vsi->eth_stats_offsets;
- nsd = &pf->stats;
-
- /* Gather up the stats that the hw collects */
- ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_errors, &es->tx_errors);
- ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_discards, &es->rx_discards);
-
- ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
- I40E_GLV_GORCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_bytes, &es->rx_bytes);
- ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
- I40E_GLV_UPRCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_unicast, &es->rx_unicast);
- ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
- I40E_GLV_MPRCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_multicast, &es->rx_multicast);
- ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
- I40E_GLV_BPRCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->rx_broadcast, &es->rx_broadcast);
-
- ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
- I40E_GLV_GOTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_bytes, &es->tx_bytes);
- ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
- I40E_GLV_UPTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_unicast, &es->tx_unicast);
- ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
- I40E_GLV_MPTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_multicast, &es->tx_multicast);
- ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
- I40E_GLV_BPTCL(stat_idx),
- vsi->stat_offsets_loaded,
- &oes->tx_broadcast, &es->tx_broadcast);
- vsi->stat_offsets_loaded = true;
-}
-
-static void
-ixl_update_vsi_stats(struct ixl_vsi *vsi)
-{
- struct ixl_pf *pf;
- struct ifnet *ifp;
- struct i40e_eth_stats *es;
- u64 tx_discards;
-
- struct i40e_hw_port_stats *nsd;
-
- pf = vsi->back;
- ifp = vsi->ifp;
- es = &vsi->eth_stats;
- nsd = &pf->stats;
-
- ixl_update_eth_stats(vsi);
-
- tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
- for (int i = 0; i < vsi->num_queues; i++)
- tx_discards += vsi->queues[i].txr.br->br_drops;
-
- /* Update ifnet stats */
- IXL_SET_IPACKETS(vsi, es->rx_unicast +
- es->rx_multicast +
- es->rx_broadcast);
- IXL_SET_OPACKETS(vsi, es->tx_unicast +
- es->tx_multicast +
- es->tx_broadcast);
- IXL_SET_IBYTES(vsi, es->rx_bytes);
- IXL_SET_OBYTES(vsi, es->tx_bytes);
- IXL_SET_IMCASTS(vsi, es->rx_multicast);
- IXL_SET_OMCASTS(vsi, es->tx_multicast);
-
- IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
- nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
- nsd->rx_jabber);
- IXL_SET_OERRORS(vsi, es->tx_errors);
- IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
- IXL_SET_OQDROPS(vsi, tx_discards);
- IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
- IXL_SET_COLLISIONS(vsi, 0);
-}
-
-/**
- * Reset all of the stats for the given pf
- **/
-void ixl_pf_reset_stats(struct ixl_pf *pf)
-{
- bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
- bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
- pf->stat_offsets_loaded = false;
-}
-
-/**
- * Resets all stats of the given vsi
- **/
-void ixl_vsi_reset_stats(struct ixl_vsi *vsi)
-{
- bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
- bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
- vsi->stat_offsets_loaded = false;
-}
-
-/**
- * Read and update a 48 bit stat from the hw
- *
- * Since the device stats are not reset at PFReset, they likely will not
- * be zeroed when the driver starts. We'll save the first values read
- * and use them as offsets to be subtracted from the raw values in order
- * to report stats that count from zero.
- **/
-static void
-ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
- bool offset_loaded, u64 *offset, u64 *stat)
-{
- u64 new_data;
-
-#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
- new_data = rd64(hw, loreg);
-#else
- /*
- * Use two rd32's instead of one rd64; FreeBSD versions before
- * 10 don't support 8 byte bus reads/writes.
- */
- new_data = rd32(hw, loreg);
- new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
-#endif
-
- if (!offset_loaded)
- *offset = new_data;
- if (new_data >= *offset)
- *stat = new_data - *offset;
- else
- *stat = (new_data + ((u64)1 << 48)) - *offset;
- *stat &= 0xFFFFFFFFFFFFULL;
-}
-
-/**
- * Read and update a 32 bit stat from the hw
- **/
-static void
-ixl_stat_update32(struct i40e_hw *hw, u32 reg,
- bool offset_loaded, u64 *offset, u64 *stat)
-{
- u32 new_data;
-
- new_data = rd32(hw, reg);
- if (!offset_loaded)
- *offset = new_data;
- if (new_data >= *offset)
- *stat = (u32)(new_data - *offset);
- else
- *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
-}
-
-static void
-ixl_add_device_sysctls(struct ixl_pf *pf)
-{
- device_t dev = pf->dev;
-
- /* Set up sysctls */
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
- pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_current_speed, "A", "Current Port Speed");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
-
-#if 0
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "rx_itr", CTLFLAG_RW,
- &ixl_rx_itr, 0, "RX ITR");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
- &ixl_dynamic_rx_itr, 0, "Dynamic RX ITR");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "tx_itr", CTLFLAG_RW,
- &ixl_tx_itr, 0, "TX ITR");
-
- SYSCTL_ADD_INT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
- &ixl_dynamic_tx_itr, 0, "Dynamic TX ITR");
-#endif
-
-#ifdef IXL_DEBUG_SYSCTL
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "debug", CTLTYPE_INT|CTLFLAG_RW, pf, 0,
- ixl_debug_info, "I", "Debug Information");
-
- /* Shared-code debug message level */
- SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "debug_mask", CTLFLAG_RW,
- &pf->hw.debug_mask, 0, "Debug Message Level");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
-
- SYSCTL_ADD_PROC(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
- pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
-
-#ifdef PCI_IOV
- SYSCTL_ADD_UINT(device_get_sysctl_ctx(dev),
- SYSCTL_CHILDREN(device_get_sysctl_tree(dev)),
- OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
- 0, "PF/VF Virtual Channel debug level");
-#endif
-#endif
-}
-
-/*
-** Set flow control using sysctl:
-** 0 - off
-** 1 - rx pause
-** 2 - tx pause
-** 3 - full
-*/
-static int
-ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
-{
- /*
- * TODO: ensure tx CRC by hardware should be enabled
- * if tx flow control is enabled.
- * ^ N/A for 40G ports
- */
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int requested_fc, error = 0;
- enum i40e_status_code aq_error = 0;
- u8 fc_aq_err = 0;
-
- /* Get request */
- requested_fc = pf->fc;
- error = sysctl_handle_int(oidp, &requested_fc, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- if (requested_fc < 0 || requested_fc > 3) {
- device_printf(dev,
- "Invalid fc mode; valid modes are 0 through 3\n");
- return (EINVAL);
- }
-
- /* Set fc ability for port */
- hw->fc.requested_mode = requested_fc;
- aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
- if (aq_error) {
- device_printf(dev,
- "%s: Error setting new fc mode %d; fc_err %#x\n",
- __func__, aq_error, fc_aq_err);
- return (EIO);
- }
- pf->fc = requested_fc;
-
- /* Get new link state */
- i40e_msec_delay(250);
- hw->phy.get_link_info = TRUE;
- i40e_get_link_status(hw, &pf->link_up);
-
- return (0);
-}
-
-static int
-ixl_current_speed(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- int error = 0, index = 0;
-
- char *speeds[] = {
- "Unknown",
- "100M",
- "1G",
- "10G",
- "40G",
- "20G"
- };
-
- ixl_update_link_status(pf);
-
- switch (hw->phy.link_info.link_speed) {
- case I40E_LINK_SPEED_100MB:
- index = 1;
- break;
- case I40E_LINK_SPEED_1GB:
- index = 2;
- break;
- case I40E_LINK_SPEED_10GB:
- index = 3;
- break;
- case I40E_LINK_SPEED_40GB:
- index = 4;
- break;
- case I40E_LINK_SPEED_20GB:
- index = 5;
- break;
- case I40E_LINK_SPEED_UNKNOWN:
- default:
- index = 0;
- break;
- }
-
- error = sysctl_handle_string(oidp, speeds[index],
- strlen(speeds[index]), req);
- return (error);
-}
-
-static int
-ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct i40e_aq_get_phy_abilities_resp abilities;
- struct i40e_aq_set_phy_config config;
- enum i40e_status_code aq_error = 0;
-
- /* Get current capability information */
- aq_error = i40e_aq_get_phy_capabilities(hw,
- FALSE, FALSE, &abilities, NULL);
- if (aq_error) {
- device_printf(dev,
- "%s: Error getting phy capabilities %d,"
- " aq error: %d\n", __func__, aq_error,
- hw->aq.asq_last_status);
- return (EAGAIN);
- }
-
- /* Prepare new config */
- bzero(&config, sizeof(config));
- config.phy_type = abilities.phy_type;
- config.abilities = abilities.abilities
- | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
- config.eee_capability = abilities.eee_capability;
- config.eeer = abilities.eeer_val;
- config.low_power_ctrl = abilities.d3_lpan;
- /* Translate into aq cmd link_speed */
- if (speeds & 0x10)
- config.link_speed |= I40E_LINK_SPEED_40GB;
- if (speeds & 0x8)
- config.link_speed |= I40E_LINK_SPEED_20GB;
- if (speeds & 0x4)
- config.link_speed |= I40E_LINK_SPEED_10GB;
- if (speeds & 0x2)
- config.link_speed |= I40E_LINK_SPEED_1GB;
- if (speeds & 0x1)
- config.link_speed |= I40E_LINK_SPEED_100MB;
-
- /* Do aq command & restart link */
- aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
- if (aq_error) {
- device_printf(dev,
- "%s: Error setting new phy config %d,"
- " aq error: %d\n", __func__, aq_error,
- hw->aq.asq_last_status);
- return (EAGAIN);
- }
-
- /*
- ** This seems a bit heavy handed, but we
- ** need to get a reinit on some devices
- */
- IXL_PF_LOCK(pf);
- ixl_stop_locked(pf);
- ixl_init_locked(pf);
- IXL_PF_UNLOCK(pf);
-
- return (0);
-}
-
-/*
-** Control link advertise speed:
-** Flags:
-** 0x1 - advertise 100 Mb
-** 0x2 - advertise 1G
-** 0x4 - advertise 10G
-** 0x8 - advertise 20G
-** 0x10 - advertise 40G
-**
-** Set to 0 to disable link
-*/
-static int
-ixl_set_advertise(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- int requested_ls = 0;
- int error = 0;
-
- /* Read in new mode */
- requested_ls = pf->advertised_speed;
- error = sysctl_handle_int(oidp, &requested_ls, 0, req);
- if ((error) || (req->newptr == NULL))
- return (error);
- /* Check for sane value */
- if (requested_ls > 0x10) {
- device_printf(dev, "Invalid advertised speed; "
- "valid modes are 0x1 through 0x10\n");
- return (EINVAL);
- }
- /* Then check for validity based on adapter type */
- switch (hw->device_id) {
- case I40E_DEV_ID_10G_BASE_T:
- case I40E_DEV_ID_10G_BASE_T4:
- /* BaseT */
- if (requested_ls & ~(0x7)) {
- device_printf(dev,
- "Only 100M/1G/10G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- case I40E_DEV_ID_20G_KR2:
- case I40E_DEV_ID_20G_KR2_A:
- /* 20G */
- if (requested_ls & ~(0xE)) {
- device_printf(dev,
- "Only 1G/10G/20G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- case I40E_DEV_ID_KX_B:
- case I40E_DEV_ID_QSFP_A:
- case I40E_DEV_ID_QSFP_B:
- /* 40G */
- if (requested_ls & ~(0x10)) {
- device_printf(dev,
- "Only 40G speeds supported on this device.\n");
- return (EINVAL);
- }
- break;
- default:
- /* 10G (1G) */
- if (requested_ls & ~(0x6)) {
- device_printf(dev,
- "Only 1/10Gbs speeds are supported on this device.\n");
- return (EINVAL);
- }
- break;
- }
-
- /* Exit if no change */
- if (pf->advertised_speed == requested_ls)
- return (0);
-
- error = ixl_set_advertised_speeds(pf, requested_ls);
- if (error)
- return (error);
-
- pf->advertised_speed = requested_ls;
- ixl_update_link_status(pf);
- return (0);
-}
-
-/*
-** Get the width and transaction speed of
-** the bus this adapter is plugged into.
-*/
-static u16
-ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
-{
- u16 link;
- u32 offset;
-
- /* Get the PCI Express Capabilities offset */
- pci_find_cap(dev, PCIY_EXPRESS, &offset);
-
- /* ...and read the Link Status Register */
- link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
-
- switch (link & I40E_PCI_LINK_WIDTH) {
- case I40E_PCI_LINK_WIDTH_1:
- hw->bus.width = i40e_bus_width_pcie_x1;
- break;
- case I40E_PCI_LINK_WIDTH_2:
- hw->bus.width = i40e_bus_width_pcie_x2;
- break;
- case I40E_PCI_LINK_WIDTH_4:
- hw->bus.width = i40e_bus_width_pcie_x4;
- break;
- case I40E_PCI_LINK_WIDTH_8:
- hw->bus.width = i40e_bus_width_pcie_x8;
- break;
- default:
- hw->bus.width = i40e_bus_width_unknown;
- break;
- }
-
- switch (link & I40E_PCI_LINK_SPEED) {
- case I40E_PCI_LINK_SPEED_2500:
- hw->bus.speed = i40e_bus_speed_2500;
- break;
- case I40E_PCI_LINK_SPEED_5000:
- hw->bus.speed = i40e_bus_speed_5000;
- break;
- case I40E_PCI_LINK_SPEED_8000:
- hw->bus.speed = i40e_bus_speed_8000;
- break;
- default:
- hw->bus.speed = i40e_bus_speed_unknown;
- break;
- }
-
- device_printf(dev,"PCI Express Bus: Speed %s %s\n",
- ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
- (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
- (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
- (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
- (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
- (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
- ("Unknown"));
-
- if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
- (hw->bus.speed < i40e_bus_speed_8000)) {
- device_printf(dev, "PCI-Express bandwidth available"
- " for this device\n may be insufficient for"
- " optimal performance.\n");
- device_printf(dev, "For expected performance a x8 "
- "PCIE Gen3 slot is required.\n");
- }
-
- return (link);
-}
-
-static int
-ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- struct sbuf *sbuf;
-
- sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
- ixl_nvm_version_str(hw, sbuf);
- sbuf_finish(sbuf);
- sbuf_delete(sbuf);
-
- return 0;
-}
-
-#ifdef IXL_DEBUG
-static void
-ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
-{
- if ((nvma->command == I40E_NVM_READ) &&
- ((nvma->config & 0xFF) == 0xF) &&
- (((nvma->config & 0xF00) >> 8) == 0xF) &&
- (nvma->offset == 0) &&
- (nvma->data_size == 1)) {
- // device_printf(dev, "- Get Driver Status Command\n");
- }
- else if (nvma->command == I40E_NVM_READ) {
-
- }
- else {
- switch (nvma->command) {
- case 0xB:
- device_printf(dev, "- command: I40E_NVM_READ\n");
- break;
- case 0xC:
- device_printf(dev, "- command: I40E_NVM_WRITE\n");
- break;
- default:
- device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
- break;
- }
-
- device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
- device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
- device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
- device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
- }
-}
-#endif
-
-static int
-ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
-{
- struct i40e_hw *hw = &pf->hw;
- struct i40e_nvm_access *nvma;
- device_t dev = pf->dev;
- enum i40e_status_code status = 0;
- int perrno;
-
- DEBUGFUNC("ixl_handle_nvmupd_cmd");
-
- /* Sanity checks */
- if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
- ifd->ifd_data == NULL) {
- device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
- __func__);
- device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
- __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
- device_printf(dev, "%s: data pointer: %p\n", __func__,
- ifd->ifd_data);
- return (EINVAL);
- }
-
- nvma = (struct i40e_nvm_access *)ifd->ifd_data;
-
-#ifdef IXL_DEBUG
- ixl_print_nvm_cmd(dev, nvma);
-#endif
-
- if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
- int count = 0;
- while (count++ < 100) {
- i40e_msec_delay(100);
- if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
- break;
- }
- }
-
- if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
- IXL_PF_LOCK(pf);
- status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
- IXL_PF_UNLOCK(pf);
- } else {
- perrno = -EBUSY;
- }
-
- if (status)
- device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
- status, perrno);
-
- /*
- * -EPERM is actually ERESTART, which the kernel interprets as it needing
- * to run this ioctl again. So use -EACCES for -EPERM instead.
- */
- if (perrno == -EPERM)
- return (-EACCES);
- else
- return (perrno);
-}
-
-#ifdef IXL_DEBUG_SYSCTL
-static int
-ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- struct i40e_link_status link_status;
- char buf[512];
-
- enum i40e_status_code aq_error = 0;
-
- aq_error = i40e_aq_get_link_info(hw, TRUE, &link_status, NULL);
- if (aq_error) {
- printf("i40e_aq_get_link_info() error %d\n", aq_error);
- return (EPERM);
- }
-
- sprintf(buf, "\n"
- "PHY Type : %#04x\n"
- "Speed : %#04x\n"
- "Link info: %#04x\n"
- "AN info : %#04x\n"
- "Ext info : %#04x\n"
- "Max Frame: %d\n"
- "Pacing : %#04x\n"
- "CRC En? : %d",
- link_status.phy_type, link_status.link_speed,
- link_status.link_info, link_status.an_info,
- link_status.ext_info, link_status.max_frame_size,
- link_status.pacing, link_status.crc_enable);
-
- return (sysctl_handle_string(oidp, buf, strlen(buf), req));
-}
-
-static int
-ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- char buf[512];
- enum i40e_status_code aq_error = 0;
-
- struct i40e_aq_get_phy_abilities_resp abilities;
-
- aq_error = i40e_aq_get_phy_capabilities(hw,
- TRUE, FALSE, &abilities, NULL);
- if (aq_error) {
- printf("i40e_aq_get_phy_capabilities() error %d\n", aq_error);
- return (EPERM);
- }
-
- sprintf(buf, "\n"
- "PHY Type : %#010x\n"
- "Speed : %#04x\n"
- "Abilities: %#04x\n"
- "EEE cap : %#06x\n"
- "EEER reg : %#010x\n"
- "D3 Lpan : %#04x",
- abilities.phy_type, abilities.link_speed,
- abilities.abilities, abilities.eee_capability,
- abilities.eeer_val, abilities.d3_lpan);
-
- return (sysctl_handle_string(oidp, buf, strlen(buf), req));
-}
-
-static int
-ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_mac_filter *f;
- char *buf, *buf_i;
-
- int error = 0;
- int ftl_len = 0;
- int ftl_counter = 0;
- int buf_len = 0;
- int entry_len = 42;
-
- SLIST_FOREACH(f, &vsi->ftl, next) {
- ftl_len++;
- }
-
- if (ftl_len < 1) {
- sysctl_handle_string(oidp, "(none)", 6, req);
- return (0);
- }
-
- buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
- buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
-
- sprintf(buf_i++, "\n");
- SLIST_FOREACH(f, &vsi->ftl, next) {
- sprintf(buf_i,
- MAC_FORMAT ", vlan %4d, flags %#06x",
- MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
- buf_i += entry_len;
- /* don't print '\n' for last entry */
- if (++ftl_counter != ftl_len) {
- sprintf(buf_i, "\n");
- buf_i++;
- }
- }
-
- error = sysctl_handle_string(oidp, buf, strlen(buf), req);
- if (error)
- printf("sysctl error: %d\n", error);
- free(buf, M_DEVBUF);
- return error;
-}
-
-#define IXL_SW_RES_SIZE 0x14
-static int
-ixl_res_alloc_cmp(const void *a, const void *b)
-{
- const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
- one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
- two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
-
- return ((int)one->resource_type - (int)two->resource_type);
-}
-
-/*
- * Longest string length: 25
- */
-static char *
-ixl_switch_res_type_string(u8 type)
-{
- static char * ixl_switch_res_type_strings[0x14] = {
- "VEB",
- "VSI",
- "Perfect Match MAC address",
- "S-tag",
- "(Reserved)",
- "Multicast hash entry",
- "Unicast hash entry",
- "VLAN",
- "VSI List entry",
- "(Reserved)",
- "VLAN Statistic Pool",
- "Mirror Rule",
- "Queue Set",
- "Inner VLAN Forward filter",
- "(Reserved)",
- "Inner MAC",
- "IP",
- "GRE/VN1 Key",
- "VN2 Key",
- "Tunneling Port"
- };
-
- if (type < 0x14)
- return ixl_switch_res_type_strings[type];
- else
- return "(Reserved)";
-}
-
-static int
-ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct sbuf *buf;
- int error = 0;
-
- u8 num_entries;
- struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
-
- buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
- if (!buf) {
- device_printf(dev, "Could not allocate sbuf for output.\n");
- return (ENOMEM);
- }
-
- bzero(resp, sizeof(resp));
- error = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
- resp,
- IXL_SW_RES_SIZE,
- NULL);
- if (error) {
- device_printf(dev,
- "%s: get_switch_resource_alloc() error %d, aq error %d\n",
- __func__, error, hw->aq.asq_last_status);
- sbuf_delete(buf);
- return error;
- }
-
- /* Sort entries by type for display */
- qsort(resp, num_entries,
- sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
- &ixl_res_alloc_cmp);
-
- sbuf_cat(buf, "\n");
- sbuf_printf(buf, "# of entries: %d\n", num_entries);
- sbuf_printf(buf,
- " Type | Guaranteed | Total | Used | Un-allocated\n"
- " | (this) | (all) | (this) | (all) \n");
- for (int i = 0; i < num_entries; i++) {
- sbuf_printf(buf,
- "%25s | %10d %5d %6d %12d",
- ixl_switch_res_type_string(resp[i].resource_type),
- resp[i].guaranteed,
- resp[i].total,
- resp[i].used,
- resp[i].total_unalloced);
- if (i < num_entries - 1)
- sbuf_cat(buf, "\n");
- }
-
- error = sbuf_finish(buf);
- if (error)
- device_printf(dev, "Error finishing sbuf: %d\n", error);
-
- sbuf_delete(buf);
- return error;
-}
-
-/*
-** Caller must init and delete sbuf; this function will clear and
-** finish it for caller.
-**
-** XXX: Cannot use the SEID for this, since there is no longer a
-** fixed mapping between SEID and element type.
-*/
-static char *
-ixl_switch_element_string(struct sbuf *s,
- struct i40e_aqc_switch_config_element_resp *element)
-{
- sbuf_clear(s);
-
- switch (element->element_type) {
- case I40E_AQ_SW_ELEM_TYPE_MAC:
- sbuf_printf(s, "MAC %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_PF:
- sbuf_printf(s, "PF %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_VF:
- sbuf_printf(s, "VF %3d", element->element_info);
- break;
- case I40E_AQ_SW_ELEM_TYPE_EMP:
- sbuf_cat(s, "EMP");
- break;
- case I40E_AQ_SW_ELEM_TYPE_BMC:
- sbuf_cat(s, "BMC");
- break;
- case I40E_AQ_SW_ELEM_TYPE_PV:
- sbuf_cat(s, "PV");
- break;
- case I40E_AQ_SW_ELEM_TYPE_VEB:
- sbuf_cat(s, "VEB");
- break;
- case I40E_AQ_SW_ELEM_TYPE_PA:
- sbuf_cat(s, "PA");
- break;
- case I40E_AQ_SW_ELEM_TYPE_VSI:
- sbuf_printf(s, "VSI %3d", element->element_info);
- break;
- default:
- sbuf_cat(s, "?");
- break;
- }
-
- sbuf_finish(s);
- return sbuf_data(s);
-}
-
-static int
-ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf = (struct ixl_pf *)arg1;
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct sbuf *buf;
- struct sbuf *nmbuf;
- int error = 0;
- u16 next = 0;
- u8 aq_buf[I40E_AQ_LARGE_BUF];
-
- struct i40e_aqc_get_switch_config_resp *sw_config;
- sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
-
- buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
- if (!buf) {
- device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
- return (ENOMEM);
- }
-
- error = i40e_aq_get_switch_config(hw, sw_config,
- sizeof(aq_buf), &next, NULL);
- if (error) {
- device_printf(dev,
- "%s: aq_get_switch_config() error %d, aq error %d\n",
- __func__, error, hw->aq.asq_last_status);
- sbuf_delete(buf);
- return error;
- }
- if (next)
- device_printf(dev, "%s: TODO: get more config with SEID %d\n",
- __func__, next);
-
- nmbuf = sbuf_new_auto();
- if (!nmbuf) {
- device_printf(dev, "Could not allocate sbuf for name output.\n");
- sbuf_delete(buf);
- return (ENOMEM);
- }
-
- sbuf_cat(buf, "\n");
- // Assuming <= 255 elements in switch
- sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
- sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
- /* Exclude:
- ** Revision -- all elements are revision 1 for now
- */
- sbuf_printf(buf,
- "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
- " | | | (uplink)\n");
- for (int i = 0; i < sw_config->header.num_reported; i++) {
- // "%4d (%8s) | %8s %8s %#8x",
- sbuf_printf(buf, "%4d", sw_config->element[i].seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
- &sw_config->element[i]));
- sbuf_cat(buf, " | ");
- sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
- sbuf_cat(buf, " ");
- sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
- if (i < sw_config->header.num_reported - 1)
- sbuf_cat(buf, "\n");
- }
- sbuf_delete(nmbuf);
-
- error = sbuf_finish(buf);
- if (error)
- device_printf(dev, "Error finishing sbuf: %d\n", error);
-
- sbuf_delete(buf);
-
- return (error);
-}
-
-static int
-ixl_debug_info(SYSCTL_HANDLER_ARGS)
-{
- struct ixl_pf *pf;
- int error, input = 0;
-
- error = sysctl_handle_int(oidp, &input, 0, req);
-
- if (error || !req->newptr)
- return (error);
-
- if (input == 1) {
- pf = (struct ixl_pf *)arg1;
- ixl_print_debug_info(pf);
- }
-
- return (error);
-}
-
-static void
-ixl_print_debug_info(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- struct ixl_vsi *vsi = &pf->vsi;
- struct ixl_queue *que = vsi->queues;
- struct rx_ring *rxr = &que->rxr;
- struct tx_ring *txr = &que->txr;
- u32 reg;
-
-
- printf("Queue irqs = %jx\n", (uintmax_t)que->irqs);
- printf("AdminQ irqs = %jx\n", (uintmax_t)pf->admin_irq);
- printf("RX next check = %x\n", rxr->next_check);
- printf("RX not ready = %jx\n", (uintmax_t)rxr->not_done);
- printf("RX packets = %jx\n", (uintmax_t)rxr->rx_packets);
- printf("TX desc avail = %x\n", txr->avail);
-
- reg = rd32(hw, I40E_GLV_GORCL(0xc));
- printf("RX Bytes = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_GORCL(hw->port));
- printf("Port RX Bytes = %x\n", reg);
- reg = rd32(hw, I40E_GLV_RDPC(0xc));
- printf("RX discard = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_RDPC(hw->port));
- printf("Port RX discard = %x\n", reg);
-
- reg = rd32(hw, I40E_GLV_TEPC(0xc));
- printf("TX errors = %x\n", reg);
- reg = rd32(hw, I40E_GLV_GOTCL(0xc));
- printf("TX Bytes = %x\n", reg);
-
- reg = rd32(hw, I40E_GLPRT_RUC(hw->port));
- printf("RX undersize = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_RFC(hw->port));
- printf("RX fragments = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_ROC(hw->port));
- printf("RX oversize = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_RLEC(hw->port));
- printf("RX length error = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_MRFC(hw->port));
- printf("mac remote fault = %x\n", reg);
- reg = rd32(hw, I40E_GLPRT_MLFC(hw->port));
- printf("mac local fault = %x\n", reg);
-}
-
-#endif /* IXL_DEBUG_SYSCTL */
-
-#ifdef PCI_IOV
-static int
-ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- struct ixl_vsi *vsi;
- struct i40e_vsi_context vsi_ctx;
- int i;
- uint16_t first_queue;
- enum i40e_status_code code;
-
- hw = &pf->hw;
- vsi = &pf->vsi;
-
- vsi_ctx.pf_num = hw->pf_id;
- vsi_ctx.uplink_seid = pf->veb_seid;
- vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
- vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
- vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
-
- bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
-
- vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
- vsi_ctx.info.switch_id = htole16(0);
-
- vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
- vsi_ctx.info.sec_flags = 0;
- if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
- vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
-
- /* TODO: If a port VLAN is set, then this needs to be changed */
- vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
- vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
- I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
-
- vsi_ctx.info.valid_sections |=
- htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
- vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
- first_queue = vsi->num_queues + vf->vf_num * IXLV_MAX_QUEUES;
- for (i = 0; i < IXLV_MAX_QUEUES; i++)
- vsi_ctx.info.queue_mapping[i] = htole16(first_queue + i);
- for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
- vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
-
- vsi_ctx.info.tc_mapping[0] = htole16(
- (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
- (1 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
-
- code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
- if (code != I40E_SUCCESS)
- return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
- vf->vsi.seid = vsi_ctx.seid;
- vf->vsi.vsi_num = vsi_ctx.vsi_number;
- vf->vsi.first_queue = first_queue;
- vf->vsi.num_queues = IXLV_MAX_QUEUES;
-
- code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
- if (code != I40E_SUCCESS)
- return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
-
- code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
- if (code != I40E_SUCCESS) {
- device_printf(pf->dev, "Failed to disable BW limit: %d\n",
- ixl_adminq_err_to_errno(hw->aq.asq_last_status));
- return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
- }
-
- memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
- return (0);
-}
-
-static int
-ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- int error;
-
- hw = &pf->hw;
-
- error = ixl_vf_alloc_vsi(pf, vf);
- if (error != 0)
- return (error);
-
- vf->vsi.hw_filters_add = 0;
- vf->vsi.hw_filters_del = 0;
- ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
- ixl_reconfigure_filters(&vf->vsi);
-
- return (0);
-}
-
-static void
-ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
- uint32_t val)
-{
- uint32_t qtable;
- int index, shift;
-
- /*
- * Two queues are mapped in a single register, so we have to do some
- * gymnastics to convert the queue number into a register index and
- * shift.
- */
- index = qnum / 2;
- shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
-
- qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
- qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
- qtable |= val << shift;
- i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
-}
-
-static void
-ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t qtable;
- int i;
-
- hw = &pf->hw;
-
- /*
- * Contiguous mappings aren't actually supported by the hardware,
- * so we have to use non-contiguous mappings.
- */
- i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
- I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
-
- wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
- I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
-
- for (i = 0; i < vf->vsi.num_queues; i++) {
- qtable = (vf->vsi.first_queue + i) <<
- I40E_VPLAN_QTABLE_QINDEX_SHIFT;
-
- wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
- }
-
- /* Map queues allocated to VF to its VSI. */
- for (i = 0; i < vf->vsi.num_queues; i++)
- ixl_vf_map_vsi_queue(hw, vf, i, vf->vsi.first_queue + i);
-
- /* Set rest of VSI queues as unused. */
- for (; i < IXL_MAX_VSI_QUEUES; i++)
- ixl_vf_map_vsi_queue(hw, vf, i,
- I40E_VSILAN_QTABLE_QINDEX_0_MASK);
-
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
-{
- struct i40e_hw *hw;
-
- hw = &pf->hw;
-
- if (vsi->seid == 0)
- return;
-
- i40e_aq_delete_element(hw, vsi->seid, NULL);
-}
-
-static void
-ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
-{
-
- wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
-{
-
- wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
- I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t vfint_reg, vpint_reg;
- int i;
-
- hw = &pf->hw;
-
- ixl_vf_vsi_release(pf, &vf->vsi);
-
- /* Index 0 has a special register. */
- ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
-
- for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
- vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
- ixl_vf_disable_queue_intr(hw, vfint_reg);
- }
-
- /* Index 0 has a special register. */
- ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
-
- for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
- vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
- ixl_vf_unregister_intr(hw, vpint_reg);
- }
-
- vf->vsi.num_queues = 0;
-}
-
-static int
-ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- int i;
- uint16_t global_vf_num;
- uint32_t ciad;
-
- hw = &pf->hw;
- global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
-
- wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
- (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
- for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
- ciad = rd32(hw, I40E_PF_PCI_CIAD);
- if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
- return (0);
- DELAY(1);
- }
-
- return (ETIMEDOUT);
-}
-
-static void
-ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t vfrtrig;
-
- hw = &pf->hw;
-
- vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
- vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
- ixl_flush(hw);
-
- ixl_reinit_vf(pf, vf);
-}
-
-static void
-ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_hw *hw;
- uint32_t vfrstat, vfrtrig;
- int i, error;
-
- hw = &pf->hw;
-
- error = ixl_flush_pcie(pf, vf);
- if (error != 0)
- device_printf(pf->dev,
- "Timed out waiting for PCIe activity to stop on VF-%d\n",
- vf->vf_num);
-
- for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
- DELAY(10);
-
- vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
- if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
- break;
- }
-
- if (i == IXL_VF_RESET_TIMEOUT)
- device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
-
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
-
- vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
- vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
- wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
-
- if (vf->vsi.seid != 0)
- ixl_disable_rings(&vf->vsi);
-
- ixl_vf_release_resources(pf, vf);
- ixl_vf_setup_vsi(pf, vf);
- ixl_vf_map_queues(pf, vf);
-
- wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
- ixl_flush(hw);
-}
-
-static const char *
-ixl_vc_opcode_str(uint16_t op)
-{
-
- switch (op) {
- case I40E_VIRTCHNL_OP_VERSION:
- return ("VERSION");
- case I40E_VIRTCHNL_OP_RESET_VF:
- return ("RESET_VF");
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- return ("GET_VF_RESOURCES");
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- return ("CONFIG_TX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- return ("CONFIG_RX_QUEUE");
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- return ("CONFIG_VSI_QUEUES");
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- return ("CONFIG_IRQ_MAP");
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- return ("ENABLE_QUEUES");
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- return ("DISABLE_QUEUES");
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- return ("ADD_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- return ("DEL_ETHER_ADDRESS");
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- return ("ADD_VLAN");
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- return ("DEL_VLAN");
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- return ("CONFIG_PROMISCUOUS_MODE");
- case I40E_VIRTCHNL_OP_GET_STATS:
- return ("GET_STATS");
- case I40E_VIRTCHNL_OP_FCOE:
- return ("FCOE");
- case I40E_VIRTCHNL_OP_EVENT:
- return ("EVENT");
- default:
- return ("UNKNOWN");
- }
-}
-
-static int
-ixl_vc_opcode_level(uint16_t opcode)
-{
- switch (opcode) {
- case I40E_VIRTCHNL_OP_GET_STATS:
- return (10);
- default:
- return (5);
- }
-}
-
-static void
-ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
- enum i40e_status_code status, void *msg, uint16_t len)
-{
- struct i40e_hw *hw;
- int global_vf_id;
-
- hw = &pf->hw;
- global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
-
- I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
- "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
- ixl_vc_opcode_str(op), op, status, vf->vf_num);
-
- i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
-}
-
-static void
-ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
-{
-
- ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
-}
-
-static void
-ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
- enum i40e_status_code status, const char *file, int line)
-{
-
- I40E_VC_DEBUG(pf, 1,
- "Sending NACK (op=%s[%d], err=%d) to VF-%d from %s:%d\n",
- ixl_vc_opcode_str(op), op, status, vf->vf_num, file, line);
- ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
-}
-
-static void
-ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_version_info reply;
-
- if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
- I40E_ERR_PARAM);
- return;
- }
-
- vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
-
- reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
- reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
- sizeof(reply));
-}
-
-static void
-ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
-
- if (msg_size != 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
- I40E_ERR_PARAM);
- return;
- }
-
- ixl_reset_vf(pf, vf);
-
- /* No response to a reset message. */
-}
-
-static void
-ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vf_resource reply;
-
- if ((vf->version == 0 && msg_size != 0) ||
- (vf->version == 1 && msg_size != 4)) {
- device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
- " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
- vf->version);
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_ERR_PARAM);
- return;
- }
-
- bzero(&reply, sizeof(reply));
-
- if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
- reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
- I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
- else
- reply.vf_offload_flags = *(u32 *)msg;
-
- reply.num_vsis = 1;
- reply.num_queue_pairs = vf->vsi.num_queues;
- reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
- reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
- reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
- reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
- memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
-
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
- I40E_SUCCESS, &reply, sizeof(reply));
-}
-
-static int
-ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
- struct i40e_virtchnl_txq_info *info)
-{
- struct i40e_hw *hw;
- struct i40e_hmc_obj_txq txq;
- uint16_t global_queue_num, global_vf_num;
- enum i40e_status_code status;
- uint32_t qtx_ctl;
-
- hw = &pf->hw;
- global_queue_num = vf->vsi.first_queue + info->queue_id;
- global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
- bzero(&txq, sizeof(txq));
-
- status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
-
- txq.head_wb_ena = info->headwb_enabled;
- txq.head_wb_addr = info->dma_headwb_addr;
- txq.qlen = info->ring_len;
- txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
- txq.rdylist_act = 0;
-
- status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
- (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
- (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
- wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
- ixl_flush(hw);
-
- return (0);
-}
-
-static int
-ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
- struct i40e_virtchnl_rxq_info *info)
-{
- struct i40e_hw *hw;
- struct i40e_hmc_obj_rxq rxq;
- uint16_t global_queue_num;
- enum i40e_status_code status;
-
- hw = &pf->hw;
- global_queue_num = vf->vsi.first_queue + info->queue_id;
- bzero(&rxq, sizeof(rxq));
-
- if (info->databuffer_size > IXL_VF_MAX_BUFFER)
- return (EINVAL);
-
- if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
- info->max_pkt_size < ETHER_MIN_LEN)
- return (EINVAL);
-
- if (info->splithdr_enabled) {
- if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
- return (EINVAL);
-
- rxq.hsplit_0 = info->rx_split_pos &
- (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
- I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
- rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
-
- rxq.dtype = 2;
- }
-
- status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
- rxq.qlen = info->ring_len;
-
- rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
-
- rxq.dsize = 1;
- rxq.crcstrip = 1;
- rxq.l2tsel = 1;
-
- rxq.rxmax = info->max_pkt_size;
- rxq.tphrdesc_ena = 1;
- rxq.tphwdesc_ena = 1;
- rxq.tphdata_ena = 1;
- rxq.tphhead_ena = 1;
- rxq.lrxqthresh = 2;
- rxq.prefena = 1;
-
- status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
- if (status != I40E_SUCCESS)
- return (EINVAL);
-
- return (0);
-}
-
-static void
-ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vsi_queue_config_info *info;
- struct i40e_virtchnl_queue_pair_info *pair;
- int i;
-
- if (msg_size < sizeof(*info)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- info = msg;
- if (info->num_queue_pairs == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- if (msg_size != sizeof(*info) + info->num_queue_pairs * sizeof(*pair)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- if (info->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < info->num_queue_pairs; i++) {
- pair = &info->qpair[i];
-
- if (pair->txq.vsi_id != vf->vsi.vsi_num ||
- pair->rxq.vsi_id != vf->vsi.vsi_num ||
- pair->txq.queue_id != pair->rxq.queue_id ||
- pair->txq.queue_id >= vf->vsi.num_queues) {
-
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
- return;
- }
-
- if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
- return;
- }
-
- if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
- return;
- }
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
-}
-
-static void
-ixl_vf_set_qctl(struct ixl_pf *pf,
- const struct i40e_virtchnl_vector_map *vector,
- enum i40e_queue_type cur_type, uint16_t cur_queue,
- enum i40e_queue_type *last_type, uint16_t *last_queue)
-{
- uint32_t offset, qctl;
- uint16_t itr_indx;
-
- if (cur_type == I40E_QUEUE_TYPE_RX) {
- offset = I40E_QINT_RQCTL(cur_queue);
- itr_indx = vector->rxitr_idx;
- } else {
- offset = I40E_QINT_TQCTL(cur_queue);
- itr_indx = vector->txitr_idx;
- }
-
- qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
- (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
- (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
- I40E_QINT_RQCTL_CAUSE_ENA_MASK |
- (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
-
- wr32(&pf->hw, offset, qctl);
-
- *last_type = cur_type;
- *last_queue = cur_queue;
-}
-
-static void
-ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
- const struct i40e_virtchnl_vector_map *vector)
-{
- struct i40e_hw *hw;
- u_int qindex;
- enum i40e_queue_type type, last_type;
- uint32_t lnklst_reg;
- uint16_t rxq_map, txq_map, cur_queue, last_queue;
-
- hw = &pf->hw;
-
- rxq_map = vector->rxq_map;
- txq_map = vector->txq_map;
-
- last_queue = IXL_END_OF_INTR_LNKLST;
- last_type = I40E_QUEUE_TYPE_RX;
-
- /*
- * The datasheet says to optimize performance, RX queues and TX queues
- * should be interleaved in the interrupt linked list, so we process
- * both at once here.
- */
- while ((rxq_map != 0) || (txq_map != 0)) {
- if (txq_map != 0) {
- qindex = ffs(txq_map) - 1;
- type = I40E_QUEUE_TYPE_TX;
- cur_queue = vf->vsi.first_queue + qindex;
- ixl_vf_set_qctl(pf, vector, type, cur_queue,
- &last_type, &last_queue);
- txq_map &= ~(1 << qindex);
- }
-
- if (rxq_map != 0) {
- qindex = ffs(rxq_map) - 1;
- type = I40E_QUEUE_TYPE_RX;
- cur_queue = vf->vsi.first_queue + qindex;
- ixl_vf_set_qctl(pf, vector, type, cur_queue,
- &last_type, &last_queue);
- rxq_map &= ~(1 << qindex);
- }
- }
-
- if (vector->vector_id == 0)
- lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
- else
- lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
- vf->vf_num);
- wr32(hw, lnklst_reg,
- (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
- (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
-
- ixl_flush(hw);
-}
-
-static void
-ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_irq_map_info *map;
- struct i40e_virtchnl_vector_map *vector;
- struct i40e_hw *hw;
- int i, largest_txq, largest_rxq;
-
- hw = &pf->hw;
-
- if (msg_size < sizeof(*map)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- map = msg;
- if (map->num_vectors == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < map->num_vectors; i++) {
- vector = &map->vecmap[i];
-
- if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
- vector->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
- return;
- }
-
- if (vector->rxq_map != 0) {
- largest_rxq = fls(vector->rxq_map) - 1;
- if (largest_rxq >= vf->vsi.num_queues) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- if (vector->txq_map != 0) {
- largest_txq = fls(vector->txq_map) - 1;
- if (largest_txq >= vf->vsi.num_queues) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
- vector->txitr_idx > IXL_MAX_ITR_IDX) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
- I40E_ERR_PARAM);
- return;
- }
-
- ixl_vf_config_vector(pf, vf, vector);
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
-}
-
-static void
-ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_queue_select *select;
- int error;
-
- if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- select = msg;
- if (select->vsi_id != vf->vsi.vsi_num ||
- select->rx_queues == 0 || select->tx_queues == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- error = ixl_enable_rings(&vf->vsi);
- if (error) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
- I40E_ERR_TIMEOUT);
- return;
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
-}
-
-static void
-ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
- void *msg, uint16_t msg_size)
-{
- struct i40e_virtchnl_queue_select *select;
- int error;
-
- if (msg_size != sizeof(*select)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- select = msg;
- if (select->vsi_id != vf->vsi.vsi_num ||
- select->rx_queues == 0 || select->tx_queues == 0) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_ERR_PARAM);
- return;
- }
-
- error = ixl_disable_rings(&vf->vsi);
- if (error) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
- I40E_ERR_TIMEOUT);
- return;
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
-}
-
-static boolean_t
-ixl_zero_mac(const uint8_t *addr)
-{
- uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
-
- return (cmp_etheraddr(addr, zero));
-}
-
-static boolean_t
-ixl_bcast_mac(const uint8_t *addr)
-{
-
- return (cmp_etheraddr(addr, ixl_bcast_addr));
-}
-
-static int
-ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
-{
-
- if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
- return (EINVAL);
-
- /*
- * If the VF is not allowed to change its MAC address, don't let it
- * set a MAC filter for an address that is not a multicast address and
- * is not its assigned MAC.
- */
- if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
- !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
- return (EPERM);
-
- return (0);
-}
-
-static void
-ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_ether_addr_list *addr_list;
- struct i40e_virtchnl_ether_addr *addr;
- struct ixl_vsi *vsi;
- int i;
- size_t expected_size;
-
- vsi = &vf->vsi;
-
- if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- addr_list = msg;
- expected_size = sizeof(*addr_list) +
- addr_list->num_elements * sizeof(*addr);
-
- if (addr_list->num_elements == 0 ||
- addr_list->vsi_id != vsi->vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
- return;
- }
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- addr = &addr_list->list[i];
- ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
-}
-
-static void
-ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_ether_addr_list *addr_list;
- struct i40e_virtchnl_ether_addr *addr;
- size_t expected_size;
- int i;
-
- if (msg_size < sizeof(*addr_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- addr_list = msg;
- expected_size = sizeof(*addr_list) +
- addr_list->num_elements * sizeof(*addr);
-
- if (addr_list->num_elements == 0 ||
- addr_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- addr = &addr_list->list[i];
- if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
- return;
- }
- }
-
- for (i = 0; i < addr_list->num_elements; i++) {
- addr = &addr_list->list[i];
- ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
-}
-
-static enum i40e_status_code
-ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
-{
- struct i40e_vsi_context vsi_ctx;
-
- vsi_ctx.seid = vf->vsi.seid;
-
- bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
- vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
- vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
- I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
- return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
-}
-
-static void
-ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vlan_filter_list *filter_list;
- enum i40e_status_code code;
- size_t expected_size;
- int i;
-
- if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- filter_list = msg;
- expected_size = sizeof(*filter_list) +
- filter_list->num_elements * sizeof(uint16_t);
- if (filter_list->num_elements == 0 ||
- filter_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < filter_list->num_elements; i++) {
- if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- code = ixl_vf_enable_vlan_strip(pf, vf);
- if (code != I40E_SUCCESS) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- }
-
- for (i = 0; i < filter_list->num_elements; i++)
- ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
-}
-
-static void
-ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_vlan_filter_list *filter_list;
- int i;
- size_t expected_size;
-
- if (msg_size < sizeof(*filter_list)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- filter_list = msg;
- expected_size = sizeof(*filter_list) +
- filter_list->num_elements * sizeof(uint16_t);
- if (filter_list->num_elements == 0 ||
- filter_list->vsi_id != vf->vsi.vsi_num ||
- msg_size != expected_size) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < filter_list->num_elements; i++) {
- if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
- }
-
- if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
- I40E_ERR_PARAM);
- return;
- }
-
- for (i = 0; i < filter_list->num_elements; i++)
- ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
-}
-
-static void
-ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
- void *msg, uint16_t msg_size)
-{
- struct i40e_virtchnl_promisc_info *info;
- enum i40e_status_code code;
-
- if (msg_size != sizeof(*info)) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
- return;
- }
-
- if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
- return;
- }
-
- info = msg;
- if (info->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
- return;
- }
-
- code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
- info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL);
- if (code != I40E_SUCCESS) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
- return;
- }
-
- code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
- info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
- if (code != I40E_SUCCESS) {
- i40e_send_vf_nack(pf, vf,
- I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
- return;
- }
-
- ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
-}
-
-static void
-ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
- uint16_t msg_size)
-{
- struct i40e_virtchnl_queue_select *queue;
-
- if (msg_size != sizeof(*queue)) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_ERR_PARAM);
- return;
- }
-
- queue = msg;
- if (queue->vsi_id != vf->vsi.vsi_num) {
- i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_ERR_PARAM);
- return;
- }
-
- ixl_update_eth_stats(&vf->vsi);
-
- ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
- I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
-}
-
-static void
-ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
-{
- struct ixl_vf *vf;
- void *msg;
- uint16_t vf_num, msg_size;
- uint32_t opcode;
-
- vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
- opcode = le32toh(event->desc.cookie_high);
-
- if (vf_num >= pf->num_vfs) {
- device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
- return;
- }
-
- vf = &pf->vfs[vf_num];
- msg = event->msg_buf;
- msg_size = event->msg_len;
-
- I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
- "Got msg %s(%d) from VF-%d of size %d\n",
- ixl_vc_opcode_str(opcode), opcode, vf_num, msg_size);
-
- switch (opcode) {
- case I40E_VIRTCHNL_OP_VERSION:
- ixl_vf_version_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_RESET_VF:
- ixl_vf_reset_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
- ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
- ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
- ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
- ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
- ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
- ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_ADD_VLAN:
- ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_DEL_VLAN:
- ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
- ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
- break;
- case I40E_VIRTCHNL_OP_GET_STATS:
- ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
- break;
-
- /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
- case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
- case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
- default:
- i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
- break;
- }
-}
-
-/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
-static void
-ixl_handle_vflr(void *arg, int pending)
-{
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- uint16_t global_vf_num;
- uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
- int i;
-
- pf = arg;
- hw = &pf->hw;
-
- IXL_PF_LOCK(pf);
- for (i = 0; i < pf->num_vfs; i++) {
- global_vf_num = hw->func_caps.vf_base_id + i;
-
- vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
- vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
- vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
- if (vflrstat & vflrstat_mask) {
- wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
- vflrstat_mask);
-
- ixl_reinit_vf(pf, &pf->vfs[i]);
- }
- }
-
- icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
- icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
- wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
- ixl_flush(hw);
-
- IXL_PF_UNLOCK(pf);
-}
-
-static int
-ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
-{
-
- switch (err) {
- case I40E_AQ_RC_EPERM:
- return (EPERM);
- case I40E_AQ_RC_ENOENT:
- return (ENOENT);
- case I40E_AQ_RC_ESRCH:
- return (ESRCH);
- case I40E_AQ_RC_EINTR:
- return (EINTR);
- case I40E_AQ_RC_EIO:
- return (EIO);
- case I40E_AQ_RC_ENXIO:
- return (ENXIO);
- case I40E_AQ_RC_E2BIG:
- return (E2BIG);
- case I40E_AQ_RC_EAGAIN:
- return (EAGAIN);
- case I40E_AQ_RC_ENOMEM:
- return (ENOMEM);
- case I40E_AQ_RC_EACCES:
- return (EACCES);
- case I40E_AQ_RC_EFAULT:
- return (EFAULT);
- case I40E_AQ_RC_EBUSY:
- return (EBUSY);
- case I40E_AQ_RC_EEXIST:
- return (EEXIST);
- case I40E_AQ_RC_EINVAL:
- return (EINVAL);
- case I40E_AQ_RC_ENOTTY:
- return (ENOTTY);
- case I40E_AQ_RC_ENOSPC:
- return (ENOSPC);
- case I40E_AQ_RC_ENOSYS:
- return (ENOSYS);
- case I40E_AQ_RC_ERANGE:
- return (ERANGE);
- case I40E_AQ_RC_EFLUSHED:
- return (EINVAL); /* No exact equivalent in errno.h */
- case I40E_AQ_RC_BAD_ADDR:
- return (EFAULT);
- case I40E_AQ_RC_EMODE:
- return (EPERM);
- case I40E_AQ_RC_EFBIG:
- return (EFBIG);
- default:
- return (EINVAL);
- }
-}
-
-static int
-ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
-{
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- struct ixl_vsi *pf_vsi;
- enum i40e_status_code ret;
- int i, error;
-
- pf = device_get_softc(dev);
- hw = &pf->hw;
- pf_vsi = &pf->vsi;
-
- IXL_PF_LOCK(pf);
- pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
- M_ZERO);
-
- if (pf->vfs == NULL) {
- error = ENOMEM;
- goto fail;
- }
-
- for (i = 0; i < num_vfs; i++)
- sysctl_ctx_init(&pf->vfs[i].ctx);
-
- ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
- 1, FALSE, &pf->veb_seid, FALSE, NULL);
- if (ret != I40E_SUCCESS) {
- error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
- device_printf(dev, "add_veb failed; code=%d error=%d", ret,
- error);
- goto fail;
- }
-
- // TODO: [Configure MSI-X here]
- ixl_enable_adminq(hw);
-
- pf->num_vfs = num_vfs;
- IXL_PF_UNLOCK(pf);
- return (0);
-
-fail:
- free(pf->vfs, M_IXL);
- pf->vfs = NULL;
- IXL_PF_UNLOCK(pf);
- return (error);
-}
-
-static void
-ixl_iov_uninit(device_t dev)
-{
- struct ixl_pf *pf;
- struct i40e_hw *hw;
- struct ixl_vsi *vsi;
- struct ifnet *ifp;
- struct ixl_vf *vfs;
- int i, num_vfs;
-
- pf = device_get_softc(dev);
- hw = &pf->hw;
- vsi = &pf->vsi;
- ifp = vsi->ifp;
-
- IXL_PF_LOCK(pf);
- for (i = 0; i < pf->num_vfs; i++) {
- if (pf->vfs[i].vsi.seid != 0)
- i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
- }
-
- if (pf->veb_seid != 0) {
- i40e_aq_delete_element(hw, pf->veb_seid, NULL);
- pf->veb_seid = 0;
- }
-
- if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0)
- ixl_disable_intr(vsi);
-
- vfs = pf->vfs;
- num_vfs = pf->num_vfs;
-
- pf->vfs = NULL;
- pf->num_vfs = 0;
- IXL_PF_UNLOCK(pf);
-
- /* Do this after the unlock as sysctl_ctx_free might sleep. */
- for (i = 0; i < num_vfs; i++)
- sysctl_ctx_free(&vfs[i].ctx);
- free(vfs, M_IXL);
-}
-
-static int
-ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
-{
- char sysctl_name[QUEUE_NAME_LEN];
- struct ixl_pf *pf;
- struct ixl_vf *vf;
- const void *mac;
- size_t size;
- int error;
-
- pf = device_get_softc(dev);
- vf = &pf->vfs[vfnum];
-
- IXL_PF_LOCK(pf);
- vf->vf_num = vfnum;
-
- vf->vsi.back = pf;
- vf->vf_flags = VF_FLAG_ENABLED;
- SLIST_INIT(&vf->vsi.ftl);
-
- error = ixl_vf_setup_vsi(pf, vf);
- if (error != 0)
- goto out;
-
- if (nvlist_exists_binary(params, "mac-addr")) {
- mac = nvlist_get_binary(params, "mac-addr", &size);
- bcopy(mac, vf->mac, ETHER_ADDR_LEN);
-
- if (nvlist_get_bool(params, "allow-set-mac"))
- vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
- } else
- /*
- * If the administrator has not specified a MAC address then
- * we must allow the VF to choose one.
- */
- vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
-
- if (nvlist_get_bool(params, "mac-anti-spoof"))
- vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
-
- if (nvlist_get_bool(params, "allow-promisc"))
- vf->vf_flags |= VF_FLAG_PROMISC_CAP;
-
- /* TODO: Get VLAN that PF has set for the VF */
-
- vf->vf_flags |= VF_FLAG_VLAN_CAP;
-
- ixl_reset_vf(pf, vf);
-out:
- IXL_PF_UNLOCK(pf);
- if (error == 0) {
- snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
- ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
- }
-
- return (error);
-}
-#endif /* PCI_IOV */
diff --git a/sys/dev/ixl/if_ixlv.c b/sys/dev/ixl/if_ixlv.c
index 9be8a36e8816..9c70d79816c0 100644
--- a/sys/dev/ixl/if_ixlv.c
+++ b/sys/dev/ixl/if_ixlv.c
@@ -32,23 +32,13 @@
******************************************************************************/
/*$FreeBSD$*/
-#ifndef IXL_STANDALONE_BUILD
-#include "opt_inet.h"
-#include "opt_inet6.h"
-#include "opt_rss.h"
-#endif
-
#include "ixl.h"
#include "ixlv.h"
-#ifdef RSS
-#include <net/rss_config.h>
-#endif
-
/*********************************************************************
* Driver version
*********************************************************************/
-char ixlv_driver_version[] = "1.2.11-k";
+char ixlv_driver_version[] = "1.4.6-k";
/*********************************************************************
* PCI Device ID Table
@@ -64,6 +54,9 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
{
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF, 0, 0, 0},
{I40E_INTEL_VENDOR_ID, I40E_DEV_ID_VF_HV, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_A0_VF, 0, 0, 0},
+ {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_X722_VF_HV, 0, 0, 0},
/* required last entry */
{0, 0, 0, 0, 0}
};
@@ -73,7 +66,7 @@ static ixl_vendor_info_t ixlv_vendor_info_array[] =
*********************************************************************/
static char *ixlv_strings[] = {
- "Intel(R) Ethernet Connection XL710 VF Driver"
+ "Intel(R) Ethernet Connection XL710/X722 VF Driver"
};
@@ -119,6 +112,7 @@ static void ixlv_set_queue_rx_itr(struct ixl_queue *);
static void ixlv_set_queue_tx_itr(struct ixl_queue *);
static void ixl_init_cmd_complete(struct ixl_vc_cmd *, void *,
enum i40e_status_code);
+static void ixlv_configure_itr(struct ixlv_sc *);
static void ixlv_enable_adminq_irq(struct i40e_hw *);
static void ixlv_disable_adminq_irq(struct i40e_hw *);
@@ -137,8 +131,10 @@ static void ixlv_cap_txcsum_tso(struct ixl_vsi *,
struct ifnet *, int);
static void ixlv_add_sysctls(struct ixlv_sc *);
+#ifdef IXL_DEBUG
static int ixlv_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS);
static int ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS);
+#endif
/*********************************************************************
* FreeBSD Device Interface Entry Points
@@ -306,7 +302,7 @@ ixlv_attach(device_t dev)
/* Allocate filter lists */
ixlv_init_filters(sc);
- /* Core Lock Init*/
+ /* Core Lock Init */
mtx_init(&sc->mtx, device_get_nameunit(dev),
"IXL SC Lock", MTX_DEF);
@@ -365,14 +361,16 @@ ixlv_attach(device_t dev)
goto err_aq;
}
- INIT_DBG_DEV(dev, "VF config from PF:");
- INIT_DBG_DEV(dev, "VSIs %d, Queues %d, Max Vectors %d, Max MTU %d",
+ device_printf(dev, "VSIs %d, QPs %d, MSIX %d, RSS sizes: key %d lut %d\n",
sc->vf_res->num_vsis,
sc->vf_res->num_queue_pairs,
sc->vf_res->max_vectors,
- sc->vf_res->max_mtu);
- INIT_DBG_DEV(dev, "Offload flags: %#010x",
- sc->vf_res->vf_offload_flags);
+ sc->vf_res->rss_key_size,
+ sc->vf_res->rss_lut_size);
+#ifdef IXL_DEBUG
+ device_printf(dev, "Offload flags: 0x%b\n",
+ sc->vf_res->vf_offload_flags, IXLV_PRINTF_VF_OFFLOAD_FLAGS);
+#endif
/* got VF config message back from PF, now we can parse it */
for (int i = 0; i < sc->vf_res->num_vsis; i++) {
@@ -396,6 +394,14 @@ ixlv_attach(device_t dev)
bcopy(addr, hw->mac.addr, sizeof(addr));
}
+ /* Now that the number of queues for this VF is known, set up interrupts */
+ sc->msix = ixlv_init_msix(sc);
+ /* We fail without MSIX support */
+ if (sc->msix == 0) {
+ error = ENXIO;
+ goto err_res_buf;
+ }
+
vsi->id = sc->vsi_res->vsi_id;
vsi->back = (void *)sc;
sc->link_up = TRUE;
@@ -419,7 +425,12 @@ ixlv_attach(device_t dev)
INIT_DBG_DEV(dev, "Queue memory and interface setup");
/* Do queue interrupt setup */
- ixlv_assign_msix(sc);
+ if (ixlv_assign_msix(sc) != 0) {
+ device_printf(dev, "%s: allocating queue interrupts failed!\n",
+ __func__);
+ error = ENXIO;
+ goto out;
+ }
/* Start AdminQ taskqueue */
ixlv_init_taskqueue(sc);
@@ -829,8 +840,8 @@ ixl_init_cmd_complete(struct ixl_vc_cmd *cmd, void *arg,
*/
if (code != I40E_SUCCESS && code != I40E_ERR_ADAPTER_STOPPED) {
if_printf(sc->vsi.ifp,
- "Error %d waiting for PF to complete operation %d\n",
- code, cmd->request);
+ "Error %s waiting for PF to complete operation %d\n",
+ i40e_stat_str(&sc->hw, code), cmd->request);
}
}
@@ -901,6 +912,9 @@ ixlv_init_locked(struct ixlv_sc *sc)
ixl_init_rx_ring(que);
}
+ /* Set initial ITR values */
+ ixlv_configure_itr(sc);
+
/* Configure queues */
ixl_vc_enqueue(&sc->vc_mgr, &sc->config_queues_cmd,
IXLV_FLAG_AQ_CONFIGURE_QUEUES, ixl_init_cmd_complete, sc);
@@ -936,18 +950,31 @@ ixlv_init(void *arg)
struct ixlv_sc *sc = vsi->back;
int retries = 0;
+ /* Prevent init from running again while waiting for AQ calls
+ * made in init_locked() to complete. */
mtx_lock(&sc->mtx);
+ if (sc->init_in_progress) {
+ mtx_unlock(&sc->mtx);
+ return;
+ } else
+ sc->init_in_progress = true;
+
ixlv_init_locked(sc);
mtx_unlock(&sc->mtx);
/* Wait for init_locked to finish */
while (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING)
&& ++retries < IXLV_AQ_MAX_ERR) {
- i40e_msec_delay(25);
+ i40e_msec_pause(25);
}
- if (retries >= IXLV_AQ_MAX_ERR)
+ if (retries >= IXLV_AQ_MAX_ERR) {
if_printf(vsi->ifp,
"Init failed to complete in allotted time!\n");
+ }
+
+ mtx_lock(&sc->mtx);
+ sc->init_in_progress = false;
+ mtx_unlock(&sc->mtx);
}
/*
@@ -990,8 +1017,8 @@ ixlv_setup_vc(struct ixlv_sc *sc)
/* Need to set these AQ paramters before initializing AQ */
hw->aq.num_arq_entries = IXL_AQ_LEN;
hw->aq.num_asq_entries = IXL_AQ_LEN;
- hw->aq.arq_buf_size = IXL_AQ_BUFSZ;
- hw->aq.asq_buf_size = IXL_AQ_BUFSZ;
+ hw->aq.arq_buf_size = IXL_AQ_BUF_SZ;
+ hw->aq.asq_buf_size = IXL_AQ_BUF_SZ;
for (int i = 0; i < IXLV_AQ_MAX_ERR; i++) {
/* Initialize admin queue */
@@ -1021,13 +1048,13 @@ retry_send:
while (!i40e_asq_done(hw)) {
if (++asq_retries > IXLV_AQ_MAX_ERR) {
i40e_shutdown_adminq(hw);
- DDPRINTF(dev, "Admin Queue timeout "
- "(waiting for send_api_ver), %d more retries...",
+ device_printf(dev, "Admin Queue timeout "
+ "(waiting for send_api_ver), %d more tries...\n",
IXLV_AQ_MAX_ERR - (i + 1));
ret_error = 3;
break;
}
- i40e_msec_delay(10);
+ i40e_msec_pause(10);
}
if (asq_retries > IXLV_AQ_MAX_ERR)
continue;
@@ -1055,7 +1082,7 @@ retry_send:
if (error) {
device_printf(dev,
"%s: Unable to verify API version,"
- " error %d\n", __func__, error);
+ " error %s\n", __func__, i40e_stat_str(hw, error));
ret_error = 5;
}
break;
@@ -1096,7 +1123,7 @@ retry_config:
ret_error = 3;
goto fail;
}
- i40e_msec_delay(10);
+ i40e_msec_pause(10);
}
INIT_DBG_DEV(dev, "Sent VF config message to PF, attempt %d",
@@ -1149,6 +1176,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
{
device_t dev = sc->dev;
int rid, want, vectors, queues, available;
+ int auto_max_queues;
rid = PCIR_BAR(IXL_BAR);
sc->msix_mem = bus_alloc_resource_any(dev,
@@ -1156,7 +1184,7 @@ ixlv_init_msix(struct ixlv_sc *sc)
if (!sc->msix_mem) {
/* May not be enabled */
device_printf(sc->dev,
- "Unable to map MSIX table \n");
+ "Unable to map MSIX table\n");
goto fail;
}
@@ -1168,20 +1196,30 @@ ixlv_init_msix(struct ixlv_sc *sc)
goto fail;
}
- /* Figure out a reasonable auto config value */
- queues = (mp_ncpus > (available - 1)) ? (available - 1) : mp_ncpus;
+ /* Clamp queues to number of CPUs and # of MSI-X vectors available */
+ auto_max_queues = min(mp_ncpus, available - 1);
+ /* Clamp queues to # assigned to VF by PF */
+ auto_max_queues = min(auto_max_queues, sc->vf_res->num_queue_pairs);
- /* Override with hardcoded value if sane */
- if ((ixlv_max_queues != 0) && (ixlv_max_queues <= queues))
+ /* Override with tunable value if tunable is less than autoconfig count */
+ if ((ixlv_max_queues != 0) && (ixlv_max_queues <= auto_max_queues))
queues = ixlv_max_queues;
+ /* Use autoconfig amount if that's lower */
+ else if ((ixlv_max_queues != 0) && (ixlv_max_queues > auto_max_queues)) {
+ device_printf(dev, "ixlv_max_queues (%d) is too large, using "
+ "autoconfig amount (%d)...\n",
+ ixlv_max_queues, auto_max_queues);
+ queues = auto_max_queues;
+ }
+ /* Limit maximum auto-configured queues to 8 if no user value is set */
+ else
+ queues = min(auto_max_queues, 8);
+
#ifdef RSS
/* If we're doing RSS, clamp at the number of RSS buckets */
if (queues > rss_getnumbuckets())
queues = rss_getnumbuckets();
#endif
- /* Enforce the VF max value */
- if (queues > IXLV_MAX_QUEUES)
- queues = IXLV_MAX_QUEUES;
/*
** Want one vector (RX/TX pair) per queue
@@ -1225,25 +1263,6 @@ ixlv_init_msix(struct ixlv_sc *sc)
sc->vsi.num_queues = queues;
}
- /*
- ** Explicitly set the guest PCI BUSMASTER capability
- ** and we must rewrite the ENABLE in the MSIX control
- ** register again at this point to cause the host to
- ** successfully initialize us.
- */
- {
- u16 pci_cmd_word;
- int msix_ctrl;
- pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
- pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
- pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
- pci_find_cap(dev, PCIY_MSIX, &rid);
- rid += PCIR_MSIX_CTRL;
- msix_ctrl = pci_read_config(dev, rid, 2);
- msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
- pci_write_config(dev, rid, msix_ctrl, 2);
- }
-
/* Next we need to setup the vector for the Admin Queue */
rid = 1; // zero vector + 1
sc->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
@@ -1280,7 +1299,7 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
&rid, RF_ACTIVE);
if (!(sc->pci_mem)) {
- device_printf(dev,"Unable to allocate bus resource: memory\n");
+ device_printf(dev, "Unable to allocate bus resource: memory\n");
return (ENXIO);
}
@@ -1294,18 +1313,29 @@ ixlv_allocate_pci_resources(struct ixlv_sc *sc)
sc->hw.back = &sc->osdep;
- /* Disable adminq interrupts */
- ixlv_disable_adminq_irq(&sc->hw);
-
/*
- ** Now setup MSI/X, it will return
- ** us the number of supported vectors
+ ** Explicitly set the guest PCI BUSMASTER capability
+ ** and we must rewrite the ENABLE in the MSIX control
+ ** register again at this point to cause the host to
+ ** successfully initialize us.
+ **
+ ** This must be set before accessing any registers.
*/
- sc->msix = ixlv_init_msix(sc);
+ {
+ u16 pci_cmd_word;
+ int msix_ctrl;
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+ }
- /* We fail without MSIX support */
- if (sc->msix == 0)
- return (ENXIO);
+ /* Disable adminq interrupts (just in case) */
+ ixlv_disable_adminq_irq(&sc->hw);
return (0);
}
@@ -1330,8 +1360,10 @@ ixlv_free_pci_resources(struct ixlv_sc *sc)
bus_teardown_intr(dev, que->res, que->tag);
que->tag = NULL;
}
- if (que->res != NULL)
+ if (que->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ que->res = NULL;
+ }
}
early:
@@ -1340,8 +1372,10 @@ early:
bus_teardown_intr(dev, sc->res, sc->tag);
sc->tag = NULL;
}
- if (sc->res != NULL)
+ if (sc->res != NULL) {
bus_release_resource(dev, SYS_RES_IRQ, 1, sc->res);
+ sc->res = NULL;
+ }
pci_release_msi(dev);
@@ -1352,8 +1386,6 @@ early:
if (sc->pci_mem != NULL)
bus_release_resource(dev, SYS_RES_MEMORY,
PCIR_BAR(0), sc->pci_mem);
-
- return;
}
/*
@@ -1418,7 +1450,6 @@ ixlv_assign_msix(struct ixlv_sc *sc)
#endif
bus_bind_intr(dev, que->res, cpu_id);
que->msix = vector;
- vsi->que_mask |= (u64)(1 << que->msix);
TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
TASK_INIT(&que->task, 0, ixlv_handle_que, que);
que->tq = taskqueue_create_fast("ixlv_que", M_NOWAIT,
@@ -1454,7 +1485,7 @@ ixlv_reset(struct ixlv_sc *sc)
if (sc->init_state != IXLV_RESET_PENDING)
ixlv_request_reset(sc);
- i40e_msec_delay(100);
+ i40e_msec_pause(100);
error = ixlv_reset_complete(hw);
if (error) {
device_printf(dev, "%s: VF reset failed\n",
@@ -1484,6 +1515,7 @@ ixlv_reset_complete(struct i40e_hw *hw)
{
u32 reg;
+ /* Wait up to ~10 seconds */
for (int i = 0; i < 100; i++) {
reg = rd32(hw, I40E_VFGEN_RSTAT) &
I40E_VFGEN_RSTAT_VFR_STATE_MASK;
@@ -1491,7 +1523,7 @@ ixlv_reset_complete(struct i40e_hw *hw)
if ((reg == I40E_VFR_VFACTIVE) ||
(reg == I40E_VFR_COMPLETED))
return (0);
- i40e_msec_delay(100);
+ i40e_msec_pause(100);
}
return (EBUSY);
@@ -1522,7 +1554,7 @@ ixlv_setup_interface(device_t dev, struct ixlv_sc *sc)
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
ifp->if_mtu = ETHERMTU;
- ifp->if_baudrate = 4000000000; // ??
+ ifp->if_baudrate = IF_Gbps(40);
ifp->if_init = ixlv_init;
ifp->if_softc = vsi;
ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
@@ -1686,7 +1718,7 @@ ixlv_setup_queues(struct ixlv_sc *sc)
rxr->base = (union i40e_rx_desc *)rxr->dma.va;
bzero((void *)rxr->base, rsize);
- /* Allocate receive soft structs for the ring*/
+ /* Allocate receive soft structs for the ring */
if (ixl_allocate_rx_data(que)) {
device_printf(dev,
"Critical Failure setting up receive structs\n");
@@ -1896,18 +1928,48 @@ ixlv_enable_queue_irq(struct i40e_hw *hw, int id)
u32 reg;
reg = I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK;
+ I40E_VFINT_DYN_CTLN1_CLEARPBA_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK;
wr32(hw, I40E_VFINT_DYN_CTLN1(id), reg);
}
static void
ixlv_disable_queue_irq(struct i40e_hw *hw, int id)
{
- wr32(hw, I40E_VFINT_DYN_CTLN1(id), 0);
+ wr32(hw, I40E_VFINT_DYN_CTLN1(id),
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
rd32(hw, I40E_VFGEN_RSTAT);
return;
}
+/*
+ * Get initial ITR values from tunable values.
+ */
+static void
+ixlv_configure_itr(struct ixlv_sc *sc)
+{
+ struct i40e_hw *hw = &sc->hw;
+ struct ixl_vsi *vsi = &sc->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ vsi->rx_itr_setting = ixlv_rx_itr;
+ vsi->tx_itr_setting = ixlv_tx_itr;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ wr32(hw, I40E_VFINT_ITRN1(IXL_RX_ITR, i),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
+ rxr->latency = IXL_AVE_LATENCY;
+
+ wr32(hw, I40E_VFINT_ITRN1(IXL_TX_ITR, i),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
+ txr->latency = IXL_AVE_LATENCY;
+ }
+}
/*
** Provide a update to the queue RX
@@ -2251,7 +2313,7 @@ ixlv_add_multi(struct ixl_vsi *vsi)
}
if_maddr_runlock(ifp);
- // TODO: Remove -- cannot set promiscuous mode in a VF
+ /* TODO: Remove -- cannot set promiscuous mode in a VF */
if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
/* delete all multicast filters */
ixlv_init_multi(vsi);
@@ -2381,7 +2443,8 @@ ixlv_local_timer(void *arg)
** Check status on the queues for a hang
*/
mask = (I40E_VFINT_DYN_CTLN1_INTENA_MASK |
- I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK);
+ I40E_VFINT_DYN_CTLN1_SWINT_TRIG_MASK |
+ I40E_VFINT_DYN_CTLN1_ITR_INDX_MASK);
for (int i = 0; i < vsi->num_queues; i++,que++) {
/* Any queues with outstanding work get a sw irq */
@@ -2522,28 +2585,17 @@ ixlv_free_queues(struct ixl_vsi *vsi)
free(vsi->queues, M_DEVBUF);
}
-
-/*
-** ixlv_config_rss - setup RSS
-**
-** RSS keys and table are cleared on VF reset.
-*/
static void
-ixlv_config_rss(struct ixlv_sc *sc)
+ixlv_config_rss_reg(struct ixlv_sc *sc)
{
struct i40e_hw *hw = &sc->hw;
struct ixl_vsi *vsi = &sc->vsi;
u32 lut = 0;
u64 set_hena = 0, hena;
int i, j, que_id;
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
#ifdef RSS
u32 rss_hash_config;
- u32 rss_seed[IXL_KEYSZ];
-#else
- u32 rss_seed[IXL_KEYSZ] = {0x41b01687,
- 0x183cfd8c, 0xce880440, 0x580cbc3c,
- 0x35897377, 0x328b25e1, 0x4fa98922,
- 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1};
#endif
/* Don't set up RSS if using a single queue */
@@ -2557,9 +2609,12 @@ ixlv_config_rss(struct ixlv_sc *sc)
#ifdef RSS
/* Fetch the configured RSS key */
rss_getkey((uint8_t *) &rss_seed);
+#else
+ ixl_get_default_rss_key(rss_seed);
#endif
+
/* Fill out hash function seed */
- for (i = 0; i <= IXL_KEYSZ; i++)
+ for (i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
wr32(hw, I40E_VFQF_HKEY(i), rss_seed[i]);
/* Enable PCTYPES for RSS: */
@@ -2580,18 +2635,7 @@ ixlv_config_rss(struct ixlv_sc *sc)
if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
#else
- set_hena =
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV4) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) |
- ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) |
- ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6) |
- ((u64)1 << I40E_FILTER_PCTYPE_L2_PAYLOAD);
+ set_hena = IXL_DEFAULT_RSS_HENA;
#endif
hena = (u64)rd32(hw, I40E_VFQF_HENA(0)) |
((u64)rd32(hw, I40E_VFQF_HENA(1)) << 32);
@@ -2599,9 +2643,8 @@ ixlv_config_rss(struct ixlv_sc *sc)
wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
- // TODO: Fix -- only 3,7,11,15 are filled out, instead of all 16 registers
/* Populate the LUT with max no. of queues in round robin fashion */
- for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++, j++) {
+ for (i = 0, j = 0; i < IXL_RSS_VSI_LUT_SIZE; i++, j++) {
if (j == vsi->num_queues)
j = 0;
#ifdef RSS
@@ -2616,16 +2659,46 @@ ixlv_config_rss(struct ixlv_sc *sc)
que_id = j;
#endif
/* lut = 4-byte sliding window of 4 lut entries */
- lut = (lut << 8) | (que_id & 0xF);
+ lut = (lut << 8) | (que_id & IXL_RSS_VF_LUT_ENTRY_MASK);
/* On i = 3, we have 4 entries in lut; write to the register */
if ((i & 3) == 3) {
- wr32(hw, I40E_VFQF_HLUT(i), lut);
+ wr32(hw, I40E_VFQF_HLUT(i >> 2), lut);
DDPRINTF(sc->dev, "HLUT(%2d): %#010x", i, lut);
}
}
ixl_flush(hw);
}
+static void
+ixlv_config_rss_pf(struct ixlv_sc *sc)
+{
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_key_cmd,
+ IXLV_FLAG_AQ_CONFIG_RSS_KEY, ixl_init_cmd_complete, sc);
+
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->set_rss_hena_cmd,
+ IXLV_FLAG_AQ_SET_RSS_HENA, ixl_init_cmd_complete, sc);
+
+ ixl_vc_enqueue(&sc->vc_mgr, &sc->config_rss_lut_cmd,
+ IXLV_FLAG_AQ_CONFIG_RSS_LUT, ixl_init_cmd_complete, sc);
+}
+
+/*
+** ixlv_config_rss - setup RSS
+**
+** RSS keys and table are cleared on VF reset.
+*/
+static void
+ixlv_config_rss(struct ixlv_sc *sc)
+{
+ if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG) {
+ DDPRINTF(sc->dev, "Setting up RSS using VF registers...");
+ ixlv_config_rss_reg(sc);
+ } else if (sc->vf_res->vf_offload_flags & I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF) {
+ DDPRINTF(sc->dev, "Setting up RSS using messages to PF...");
+ ixlv_config_rss_pf(sc);
+ } else
+ device_printf(sc->dev, "VF does not support RSS capability sent by PF.\n");
+}
/*
** This routine refreshes vlan filters, called by init
@@ -2868,8 +2941,8 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tso_tx",
CTLFLAG_RD, &(queues[q].tso),
"TSO");
- SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dma_setup",
- CTLFLAG_RD, &(queues[q].tx_dma_setup),
+ SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
+ CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
"Driver tx dma failure in xmit");
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
CTLFLAG_RD, &(txr->no_desc),
@@ -2886,7 +2959,14 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
SYSCTL_ADD_QUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
CTLFLAG_RD, &(rxr->rx_bytes),
"Queue Bytes Received");
-
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
+ CTLFLAG_RD, &(rxr->itr), 0,
+ "Queue Rx ITR Interval");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
+ CTLFLAG_RD, &(txr->itr), 0,
+ "Queue Tx ITR Interval");
+
+#ifdef IXL_DEBUG
/* Examine queue state */
SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_head",
CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
@@ -2898,6 +2978,7 @@ ixlv_add_sysctls(struct ixlv_sc *sc)
sizeof(struct ixl_queue),
ixlv_sysctl_qrx_tail_handler, "IU",
"Queue Receive Descriptor Tail");
+#endif
}
}
@@ -2932,6 +3013,7 @@ ixlv_free_filters(struct ixlv_sc *sc)
return;
}
+#ifdef IXL_DEBUG
/**
* ixlv_sysctl_qtx_tail_handler
* Retrieves I40E_QTX_TAIL1 value from hardware
@@ -2975,4 +3057,5 @@ ixlv_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
return error;
return (0);
}
+#endif
diff --git a/sys/dev/ixl/ixl.h b/sys/dev/ixl/ixl.h
index 68fd3f18c4b6..1a92edbc15e7 100644
--- a/sys/dev/ixl/ixl.h
+++ b/sys/dev/ixl/ixl.h
@@ -36,6 +36,9 @@
#ifndef _IXL_H_
#define _IXL_H_
+#include "opt_inet.h"
+#include "opt_inet6.h"
+#include "opt_rss.h"
#include <sys/param.h>
#include <sys/systm.h>
@@ -90,29 +93,23 @@
#include <sys/smp.h>
#include <sys/sbuf.h>
#include <machine/smp.h>
+#include <machine/stdarg.h>
-#ifdef PCI_IOV
-#include <sys/nv.h>
-#include <sys/iov_schema.h>
-#include <dev/pci/pci_iov.h>
+#ifdef RSS
+#include <net/rss_config.h>
+#include <netinet/in_rss.h>
#endif
#include "i40e_type.h"
#include "i40e_prototype.h"
-#if defined(IXL_DEBUG) || defined(IXL_DEBUG_SYSCTL)
#define MAC_FORMAT "%02x:%02x:%02x:%02x:%02x:%02x"
#define MAC_FORMAT_ARGS(mac_addr) \
(mac_addr)[0], (mac_addr)[1], (mac_addr)[2], (mac_addr)[3], \
(mac_addr)[4], (mac_addr)[5]
#define ON_OFF_STR(is_set) ((is_set) ? "On" : "Off")
-#endif /* IXL_DEBUG || IXL_DEBUG_SYSCTL */
#ifdef IXL_DEBUG
-/* Enable debug sysctls */
-#ifndef IXL_DEBUG_SYSCTL
-#define IXL_DEBUG_SYSCTL 1
-#endif
#define _DBG_PRINTF(S, ...) printf("%s: " S "\n", __func__, ##__VA_ARGS__)
#define _DEV_DBG_PRINTF(dev, S, ...) device_printf(dev, "%s: " S "\n", __func__, ##__VA_ARGS__)
@@ -157,6 +154,26 @@
#define HW_DEBUGOUT(...)
#endif /* IXL_DEBUG */
+enum ixl_dbg_mask {
+ IXL_DBG_INFO = 0x00000001,
+ IXL_DBG_EN_DIS = 0x00000002,
+ IXL_DBG_AQ = 0x00000004,
+ IXL_DBG_NVMUPD = 0x00000008,
+
+ IXL_DBG_IOCTL_KNOWN = 0x00000010,
+ IXL_DBG_IOCTL_UNKNOWN = 0x00000020,
+ IXL_DBG_IOCTL_ALL = 0x00000030,
+
+ I40E_DEBUG_RSS = 0x00000100,
+
+ IXL_DBG_IOV = 0x00001000,
+ IXL_DBG_IOV_VC = 0x00002000,
+
+ IXL_DBG_SWITCH_INFO = 0x00010000,
+
+ IXL_DBG_ALL = 0xFFFFFFFF
+};
+
/* Tunables */
/*
@@ -167,27 +184,28 @@
* Tx descriptors are always 16 bytes, but Rx descriptors can be 32 bytes.
* The driver currently always uses 32 byte Rx descriptors.
*/
-#define DEFAULT_RING 1024
-#define PERFORM_RING 2048
-#define MAX_RING 4096
-#define MIN_RING 32
+#define DEFAULT_RING 1024
+#define IXL_MAX_RING 8160
+#define IXL_MIN_RING 32
+#define IXL_RING_INCREMENT 32
+
+#define IXL_AQ_LEN 256
+#define IXL_AQ_LEN_MAX 1024
/*
** Default number of entries in Tx queue buf_ring.
*/
-#define SMALL_TXBRSZ 4096
-/* This may require mbuf cluster tuning */
-#define DEFAULT_TXBRSZ (SMALL_TXBRSZ * SMALL_TXBRSZ)
+#define DEFAULT_TXBRSZ 4096
/* Alignment for rings */
-#define DBA_ALIGN 128
+#define DBA_ALIGN 128
/*
* This is the max watchdog interval, ie. the time that can
* pass between any two TX clean operations, such only happening
* when the TX hardware is functioning.
*/
-#define IXL_WATCHDOG (10 * hz)
+#define IXL_WATCHDOG (10 * hz)
/*
* This parameters control when the driver calls the routine to reclaim
@@ -196,11 +214,6 @@
#define IXL_TX_CLEANUP_THRESHOLD (que->num_desc / 8)
#define IXL_TX_OP_THRESHOLD (que->num_desc / 32)
-/* Flow control constants */
-#define IXL_FC_PAUSE 0xFFFF
-#define IXL_FC_HI 0x20000
-#define IXL_FC_LO 0x10000
-
#define MAX_MULTICAST_ADDR 128
#define IXL_BAR 3
@@ -208,10 +221,6 @@
#define IXL_TSO_SIZE 65535
#define IXL_AQ_BUF_SZ ((u32) 4096)
#define IXL_RX_HDR 128
-/* Controls the length of the Admin Queue */
-#define IXL_AQ_LEN 256
-#define IXL_AQ_LEN_MAX 1024
-#define IXL_AQ_BUFSZ 4096
#define IXL_RX_LIMIT 512
#define IXL_RX_ITR 0
#define IXL_TX_ITR 1
@@ -219,25 +228,29 @@
#define IXL_QUEUE_EOL 0x7FF
#define IXL_MAX_FRAME 9728
#define IXL_MAX_TX_SEGS 8
-#define IXL_MAX_TSO_SEGS 66
+#define IXL_MAX_TSO_SEGS 128
#define IXL_SPARSE_CHAIN 6
#define IXL_QUEUE_HUNG 0x80000000
-#define IXL_KEYSZ 10
+
+#define IXL_RSS_KEY_SIZE_REG 13
+#define IXL_RSS_KEY_SIZE (IXL_RSS_KEY_SIZE_REG * 4)
+#define IXL_RSS_VSI_LUT_SIZE 64 /* X722 -> VSI, X710 -> VF */
+#define IXL_RSS_VSI_LUT_ENTRY_MASK 0x3F
+#define IXL_RSS_VF_LUT_ENTRY_MASK 0xF
#define IXL_VF_MAX_BUFFER 0x3F80
#define IXL_VF_MAX_HDR_BUFFER 0x840
#define IXL_VF_MAX_FRAME 0x3FFF
-/* ERJ: hardware can support ~1.5k filters between all functions */
-#define IXL_MAX_FILTERS 256
-#define IXL_MAX_TX_BUSY 10
+/* ERJ: hardware can support ~2k (SW5+) filters between all functions */
+#define IXL_MAX_FILTERS 256
+#define IXL_MAX_TX_BUSY 10
#define IXL_NVM_VERSION_LO_SHIFT 0
#define IXL_NVM_VERSION_LO_MASK (0xff << IXL_NVM_VERSION_LO_SHIFT)
#define IXL_NVM_VERSION_HI_SHIFT 12
#define IXL_NVM_VERSION_HI_MASK (0xf << IXL_NVM_VERSION_HI_SHIFT)
-
/*
* Interrupt Moderation parameters
*/
@@ -298,6 +311,19 @@
#define IXL_END_OF_INTR_LNKLST 0x7FF
+#define IXL_DEFAULT_RSS_HENA (\
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
+ BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
+
#define IXL_TX_LOCK(_sc) mtx_lock(&(_sc)->mtx)
#define IXL_TX_UNLOCK(_sc) mtx_unlock(&(_sc)->mtx)
#define IXL_TX_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->mtx)
@@ -337,11 +363,6 @@
#define IXL_SET_NOPROTO(vsi, count) (vsi)->noproto = (count)
#endif
-/* Pre-10.2 media type compatibility */
-#if __FreeBSD_version < 1002000
-#define IFM_OTHER IFM_UNKNOWN
-#endif
-
/*
*****************************************************************************
* vendor_info_array
@@ -480,45 +501,40 @@ struct ixl_queue {
u64 mbuf_defrag_failed;
u64 mbuf_hdr_failed;
u64 mbuf_pkt_failed;
- u64 tx_map_avail;
- u64 tx_dma_setup;
+ u64 tx_dmamap_failed;
u64 dropped_pkts;
};
/*
-** Virtual Station interface:
-** there would be one of these per traffic class/type
-** for now just one, and its embedded in the pf
+** Virtual Station Interface
*/
SLIST_HEAD(ixl_ftl_head, ixl_mac_filter);
struct ixl_vsi {
void *back;
struct ifnet *ifp;
- struct device *dev;
+ device_t dev;
struct i40e_hw *hw;
struct ifmedia media;
enum i40e_vsi_type type;
- u64 que_mask;
int id;
- u16 vsi_num;
- u16 msix_base; /* station base MSIX vector */
- u16 first_queue;
u16 num_queues;
u32 rx_itr_setting;
u32 tx_itr_setting;
+ u16 max_frame_size;
+
struct ixl_queue *queues; /* head of queues */
+
+ u16 vsi_num;
bool link_active;
u16 seid;
u16 uplink_seid;
u16 downlink_seid;
- u16 max_frame_size;
- u16 rss_table_size;
- u16 rss_size;
/* MAC/VLAN Filter list */
struct ixl_ftl_head ftl;
u16 num_macs;
+ /* Contains readylist & stat counter id */
struct i40e_aqc_vsi_properties_data info;
eventhandler_tag vlan_attach;
@@ -565,7 +581,7 @@ ixl_rx_unrefreshed(struct ixl_queue *que)
else
return ((que->num_desc + rxr->next_check) -
rxr->next_refresh - 1);
-}
+}
/*
** Find the next available unused filter
@@ -601,6 +617,28 @@ cmp_etheraddr(const u8 *ea1, const u8 *ea2)
}
/*
+ * Return next largest power of 2, unsigned
+ *
+ * Public domain, from Bit Twiddling Hacks
+ */
+static inline u32
+next_power_of_two(u32 n)
+{
+ n--;
+ n |= n >> 1;
+ n |= n >> 2;
+ n |= n >> 4;
+ n |= n >> 8;
+ n |= n >> 16;
+ n++;
+
+ /* Next power of two > 0 is 1 */
+ n += (n == 0);
+
+ return (n);
+}
+
+/*
* Info for stats sysctls
*/
struct ixl_sysctl_info {
@@ -609,7 +647,8 @@ struct ixl_sysctl_info {
char *description;
};
-extern int ixl_atr_rate;
+static uint8_t ixl_bcast_addr[ETHER_ADDR_LEN] =
+ {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/*********************************************************************
* TXRX Function prototypes
@@ -620,18 +659,18 @@ void ixl_init_tx_ring(struct ixl_queue *);
int ixl_init_rx_ring(struct ixl_queue *);
bool ixl_rxeof(struct ixl_queue *, int);
bool ixl_txeof(struct ixl_queue *);
+void ixl_free_que_tx(struct ixl_queue *);
+void ixl_free_que_rx(struct ixl_queue *);
+
int ixl_mq_start(struct ifnet *, struct mbuf *);
int ixl_mq_start_locked(struct ifnet *, struct tx_ring *);
void ixl_deferred_mq_start(void *, int);
-void ixl_qflush(struct ifnet *);
void ixl_free_vsi(struct ixl_vsi *);
-void ixl_free_que_tx(struct ixl_queue *);
-void ixl_free_que_rx(struct ixl_queue *);
-#ifdef IXL_FDIR
-void ixl_atr(struct ixl_queue *, struct tcphdr *, int);
-#endif
+void ixl_qflush(struct ifnet *);
+
+/* Common function prototypes between PF/VF driver */
#if __FreeBSD_version >= 1100000
uint64_t ixl_get_counter(if_t ifp, ift_counter cnt);
#endif
-
+void ixl_get_default_rss_key(u32 *);
#endif /* _IXL_H_ */
diff --git a/sys/dev/ixl/ixl_pf.h b/sys/dev/ixl/ixl_pf.h
index 65915bea1544..107e30fded5d 100644
--- a/sys/dev/ixl/ixl_pf.h
+++ b/sys/dev/ixl/ixl_pf.h
@@ -36,6 +36,9 @@
#ifndef _IXL_PF_H_
#define _IXL_PF_H_
+#include "ixl.h"
+#include "ixl_pf_qmgr.h"
+
#define VF_FLAG_ENABLED 0x01
#define VF_FLAG_SET_MAC_CAP 0x02
#define VF_FLAG_VLAN_CAP 0x04
@@ -52,6 +55,7 @@ struct ixl_vf {
uint16_t vf_num;
uint32_t version;
+ struct ixl_pf_qtag qtag;
struct sysctl_ctx_list ctx;
};
@@ -59,7 +63,8 @@ struct ixl_vf {
struct ixl_pf {
struct i40e_hw hw;
struct i40e_osdep osdep;
- struct device *dev;
+ device_t dev;
+ struct ixl_vsi vsi;
struct resource *pci_mem;
struct resource *msix_mem;
@@ -77,6 +82,19 @@ struct ixl_pf {
int if_flags;
int state;
+ struct ixl_pf_qmgr qmgr;
+ struct ixl_pf_qtag qtag;
+
+ /* Tunable values */
+ bool enable_msix;
+ int max_queues;
+ int ringsz;
+ bool enable_tx_fc_filter;
+ int dynamic_rx_itr;
+ int dynamic_tx_itr;
+ int tx_itr;
+ int rx_itr;
+
struct mtx pf_mtx;
u32 qbase;
@@ -87,17 +105,8 @@ struct ixl_pf {
bool link_up;
u32 link_speed;
int advertised_speed;
- int fc; /* local flow ctrl setting */
-
- /*
- ** Network interfaces
- ** These are the traffic class holders, and
- ** will have a stack interface and queues
- ** associated with them.
- ** NOTE: The PF has only a single interface,
- ** so it is embedded in the PF struct.
- */
- struct ixl_vsi vsi;
+ int fc; /* link flow ctrl setting */
+ enum ixl_dbg_mask dbg_mask;
/* Misc stats maintained by the driver */
u64 watchdog_events;
@@ -108,6 +117,7 @@ struct ixl_pf {
struct i40e_hw_port_stats stats_offsets;
bool stat_offsets_loaded;
+ /* SR-IOV */
struct ixl_vf *vfs;
int num_vfs;
uint16_t veb_seid;
@@ -149,42 +159,18 @@ struct ixl_pf {
"\nExecutes a \"Get Link Status\" command on the Admin Queue, and displays" \
" the response." \
-/*** Functions / Macros ***/
+static char *ixl_fc_string[6] = {
+ "None",
+ "Rx",
+ "Tx",
+ "Full",
+ "Priority",
+ "Default"
+};
-/*
-** Put the NVM, EEtrackID, and OEM version information into a string
-*/
-static void
-ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
-{
- u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
- u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
- u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
-
- sbuf_printf(buf,
- "nvm %x.%02x etid %08x oem %d.%d.%d",
- (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
- IXL_NVM_VERSION_HI_SHIFT,
- (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
- IXL_NVM_VERSION_LO_SHIFT,
- hw->nvm.eetrack,
- oem_ver, oem_build, oem_patch);
-}
-
-static void
-ixl_print_nvm_version(struct ixl_pf *pf)
-{
- struct i40e_hw *hw = &pf->hw;
- device_t dev = pf->dev;
- struct sbuf *sbuf;
-
- sbuf = sbuf_new_auto();
- ixl_nvm_version_str(hw, sbuf);
- sbuf_finish(sbuf);
- device_printf(dev, "%s\n", sbuf_data(sbuf));
- sbuf_delete(sbuf);
-}
+static MALLOC_DEFINE(M_IXL, "ixl", "ixl driver allocations");
+/*** Functions / Macros ***/
#define I40E_VC_DEBUG(pf, level, ...) \
do { \
if ((pf)->vc_debug_lvl >= (level)) \
@@ -201,4 +187,136 @@ ixl_print_nvm_version(struct ixl_pf *pf)
#define IXL_PF_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->pf_mtx)
#define IXL_PF_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->pf_mtx, MA_OWNED)
+/* For stats sysctl naming */
+#define QUEUE_NAME_LEN 32
+
+/*
+ * PF-only function declarations
+ */
+
+void ixl_set_busmaster(device_t);
+int ixl_setup_interface(device_t, struct ixl_vsi *);
+void ixl_print_nvm_cmd(device_t, struct i40e_nvm_access *);
+
+void ixl_handle_que(void *context, int pending);
+
+void ixl_init(void *);
+void ixl_local_timer(void *);
+void ixl_register_vlan(void *, struct ifnet *, u16);
+void ixl_unregister_vlan(void *, struct ifnet *, u16);
+void ixl_intr(void *);
+void ixl_msix_que(void *);
+void ixl_msix_adminq(void *);
+void ixl_do_adminq(void *, int);
+
+int ixl_res_alloc_cmp(const void *, const void *);
+char * ixl_switch_res_type_string(u8);
+char * ixl_switch_element_string(struct sbuf *,
+ struct i40e_aqc_switch_config_element_resp *);
+void ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *,
+ struct sysctl_oid_list *, struct i40e_hw_port_stats *);
+void ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *,
+ struct sysctl_oid_list *,
+ struct i40e_eth_stats *);
+
+void ixl_media_status(struct ifnet *, struct ifmediareq *);
+int ixl_media_change(struct ifnet *);
+int ixl_ioctl(struct ifnet *, u_long, caddr_t);
+
+void ixl_enable_adminq(struct i40e_hw *);
+void ixl_get_bus_info(struct i40e_hw *, device_t);
+void ixl_disable_adminq(struct i40e_hw *);
+void ixl_enable_queue(struct i40e_hw *, int);
+void ixl_disable_queue(struct i40e_hw *, int);
+void ixl_enable_legacy(struct i40e_hw *);
+void ixl_disable_legacy(struct i40e_hw *);
+void ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf);
+void ixl_stat_update48(struct i40e_hw *, u32, u32, bool,
+ u64 *, u64 *);
+void ixl_stat_update32(struct i40e_hw *, u32, bool,
+ u64 *, u64 *);
+
+void ixl_stop(struct ixl_pf *);
+void ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi, struct sysctl_ctx_list *ctx, const char *sysctl_name);
+int ixl_get_hw_capabilities(struct ixl_pf *);
+void ixl_update_link_status(struct ixl_pf *);
+int ixl_allocate_pci_resources(struct ixl_pf *);
+int ixl_setup_stations(struct ixl_pf *);
+int ixl_switch_config(struct ixl_pf *);
+void ixl_stop_locked(struct ixl_pf *);
+int ixl_teardown_hw_structs(struct ixl_pf *);
+int ixl_reset(struct ixl_pf *);
+void ixl_init_locked(struct ixl_pf *);
+void ixl_set_rss_key(struct ixl_pf *);
+void ixl_set_rss_pctypes(struct ixl_pf *);
+void ixl_set_rss_hlut(struct ixl_pf *);
+int ixl_setup_adminq_msix(struct ixl_pf *);
+int ixl_setup_adminq_tq(struct ixl_pf *);
+int ixl_teardown_adminq_msix(struct ixl_pf *);
+void ixl_configure_intr0_msix(struct ixl_pf *);
+void ixl_configure_queue_intr_msix(struct ixl_pf *);
+void ixl_free_adminq_tq(struct ixl_pf *);
+int ixl_assign_vsi_legacy(struct ixl_pf *);
+int ixl_init_msix(struct ixl_pf *);
+void ixl_configure_itr(struct ixl_pf *);
+void ixl_configure_legacy(struct ixl_pf *);
+void ixl_free_pci_resources(struct ixl_pf *);
+void ixl_link_event(struct ixl_pf *, struct i40e_arq_event_info *);
+void ixl_config_rss(struct ixl_pf *);
+int ixl_set_advertised_speeds(struct ixl_pf *, int);
+void ixl_get_initial_advertised_speeds(struct ixl_pf *);
+void ixl_print_nvm_version(struct ixl_pf *pf);
+void ixl_add_device_sysctls(struct ixl_pf *);
+void ixl_handle_mdd_event(struct ixl_pf *);
+void ixl_add_hw_stats(struct ixl_pf *);
+void ixl_update_stats_counters(struct ixl_pf *);
+void ixl_pf_reset_stats(struct ixl_pf *);
+void ixl_dbg(struct ixl_pf *, enum ixl_dbg_mask, char *, ...);
+
+int ixl_handle_nvmupd_cmd(struct ixl_pf *, struct ifdrv *);
+void ixl_handle_empr_reset(struct ixl_pf *);
+int ixl_rebuild_hw_structs_after_reset(struct ixl_pf *);
+
+void ixl_set_queue_rx_itr(struct ixl_queue *);
+void ixl_set_queue_tx_itr(struct ixl_queue *);
+
+void ixl_add_filter(struct ixl_vsi *, u8 *, s16 vlan);
+void ixl_del_filter(struct ixl_vsi *, u8 *, s16 vlan);
+void ixl_reconfigure_filters(struct ixl_vsi *vsi);
+
+int ixl_disable_rings(struct ixl_vsi *);
+int ixl_disable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_disable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
+
+int ixl_enable_rings(struct ixl_vsi *);
+int ixl_enable_tx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_enable_rx_ring(struct ixl_pf *, struct ixl_pf_qtag *, u16);
+int ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *, u16);
+
+void ixl_update_eth_stats(struct ixl_vsi *);
+void ixl_disable_intr(struct ixl_vsi *);
+void ixl_cap_txcsum_tso(struct ixl_vsi *, struct ifnet *, int);
+int ixl_initialize_vsi(struct ixl_vsi *);
+void ixl_add_ifmedia(struct ixl_vsi *, u32);
+int ixl_setup_queue_msix(struct ixl_vsi *);
+int ixl_setup_queue_tqs(struct ixl_vsi *);
+int ixl_teardown_queue_msix(struct ixl_vsi *);
+void ixl_free_queue_tqs(struct ixl_vsi *);
+void ixl_enable_intr(struct ixl_vsi *);
+void ixl_disable_rings_intr(struct ixl_vsi *);
+void ixl_set_promisc(struct ixl_vsi *);
+void ixl_add_multi(struct ixl_vsi *);
+void ixl_del_multi(struct ixl_vsi *);
+void ixl_setup_vlan_filters(struct ixl_vsi *);
+void ixl_init_filters(struct ixl_vsi *);
+void ixl_add_hw_filters(struct ixl_vsi *, int, int);
+void ixl_del_hw_filters(struct ixl_vsi *, int);
+struct ixl_mac_filter *
+ ixl_find_filter(struct ixl_vsi *, u8 *, s16);
+void ixl_add_mc_filter(struct ixl_vsi *, u8 *);
+void ixl_free_mac_filters(struct ixl_vsi *vsi);
+void ixl_update_vsi_stats(struct ixl_vsi *);
+void ixl_vsi_reset_stats(struct ixl_vsi *);
+
#endif /* _IXL_PF_H_ */
diff --git a/sys/dev/ixl/ixl_pf_iov.c b/sys/dev/ixl/ixl_pf_iov.c
new file mode 100644
index 000000000000..a8c8b29cc605
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_iov.c
@@ -0,0 +1,1925 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+#include "ixl_pf_iov.h"
+
+/* Private functions */
+static void ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum, uint32_t val);
+static void ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg);
+static void ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg);
+
+static bool ixl_zero_mac(const uint8_t *addr);
+static bool ixl_bcast_mac(const uint8_t *addr);
+
+static const char * ixl_vc_opcode_str(uint16_t op);
+static int ixl_vc_opcode_level(uint16_t opcode);
+
+static int ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr);
+
+static int ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
+static int ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi);
+static void ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf);
+static int ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, void *msg, uint16_t len);
+static void ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op);
+static void ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op, enum i40e_status_code status, const char *file, int line);
+static void ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static int ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_txq_info *info);
+static int ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf, struct i40e_virtchnl_rxq_info *info);
+static void ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_set_qctl(struct ixl_pf *pf, const struct i40e_virtchnl_vector_map *vector, enum i40e_queue_type cur_type, uint16_t cur_queue,
+ enum i40e_queue_type *last_type, uint16_t *last_queue);
+static void ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf, const struct i40e_virtchnl_vector_map *vector);
+static void ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static enum i40e_status_code ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf);
+static void ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static void ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg, uint16_t msg_size);
+static int ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues);
+
+static int ixl_adminq_err_to_errno(enum i40e_admin_queue_err err);
+
+void
+ixl_initialize_sriov(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ nvlist_t *pf_schema, *vf_schema;
+ int iov_error;
+
+ /* SR-IOV is only supported when MSI-X is in use. */
+ if (pf->msix <= 1)
+ return;
+
+ pf_schema = pci_iov_schema_alloc_node();
+ vf_schema = pci_iov_schema_alloc_node();
+ pci_iov_schema_add_unicast_mac(vf_schema, "mac-addr", 0, NULL);
+ pci_iov_schema_add_bool(vf_schema, "mac-anti-spoof",
+ IOV_SCHEMA_HASDEFAULT, TRUE);
+ pci_iov_schema_add_bool(vf_schema, "allow-set-mac",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_bool(vf_schema, "allow-promisc",
+ IOV_SCHEMA_HASDEFAULT, FALSE);
+ pci_iov_schema_add_uint16(vf_schema, "num-queues",
+ IOV_SCHEMA_HASDEFAULT,
+ max(1, hw->func_caps.num_msix_vectors_vf - 1) % IXLV_MAX_QUEUES);
+
+ iov_error = pci_iov_attach(dev, pf_schema, vf_schema);
+ if (iov_error != 0) {
+ device_printf(dev,
+ "Failed to initialize SR-IOV (error=%d)\n",
+ iov_error);
+ } else
+ device_printf(dev, "SR-IOV ready\n");
+
+ pf->vc_debug_lvl = 1;
+}
+
+/*
+ * Allocate the VSI for a VF.
+ */
+static int
+ixl_vf_alloc_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ device_t dev;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ struct i40e_vsi_context vsi_ctx;
+ int i;
+ enum i40e_status_code code;
+
+ hw = &pf->hw;
+ vsi = &pf->vsi;
+ dev = pf->dev;
+
+ vsi_ctx.pf_num = hw->pf_id;
+ vsi_ctx.uplink_seid = pf->veb_seid;
+ vsi_ctx.connection_type = IXL_VSI_DATA_PORT;
+ vsi_ctx.vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+ vsi_ctx.flags = I40E_AQ_VSI_TYPE_VF;
+
+ bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+
+ vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+ vsi_ctx.info.switch_id = htole16(0);
+
+ vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_SECURITY_VALID);
+ vsi_ctx.info.sec_flags = 0;
+ if (vf->vf_flags & VF_FLAG_MAC_ANTI_SPOOF)
+ vsi_ctx.info.sec_flags |= I40E_AQ_VSI_SEC_FLAG_ENABLE_MAC_CHK;
+
+ vsi_ctx.info.valid_sections |= htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ vsi_ctx.info.valid_sections |=
+ htole16(I40E_AQ_VSI_PROP_QUEUE_MAP_VALID);
+ vsi_ctx.info.mapping_flags = htole16(I40E_AQ_VSI_QUE_MAP_NONCONTIG);
+
+ /* ERJ: Only scattered allocation is supported for VFs right now */
+ for (i = 0; i < vf->qtag.num_active; i++)
+ vsi_ctx.info.queue_mapping[i] = vf->qtag.qidx[i];
+ for (; i < nitems(vsi_ctx.info.queue_mapping); i++)
+ vsi_ctx.info.queue_mapping[i] = htole16(I40E_AQ_VSI_QUEUE_MASK);
+
+ vsi_ctx.info.tc_mapping[0] = htole16(
+ (0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) |
+ (bsrl(vf->qtag.num_allocated) << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT));
+
+ code = i40e_aq_add_vsi(hw, &vsi_ctx, NULL);
+ if (code != I40E_SUCCESS)
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ vf->vsi.seid = vsi_ctx.seid;
+ vf->vsi.vsi_num = vsi_ctx.vsi_number;
+ // vf->vsi.first_queue = vf->qtag.qidx[0];
+ vf->vsi.num_queues = vf->qtag.num_active;
+
+ code = i40e_aq_get_vsi_params(hw, &vsi_ctx, NULL);
+ if (code != I40E_SUCCESS)
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+
+ code = i40e_aq_config_vsi_bw_limit(hw, vf->vsi.seid, 0, 0, NULL);
+ if (code != I40E_SUCCESS) {
+ device_printf(dev, "Failed to disable BW limit: %d\n",
+ ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ return (ixl_adminq_err_to_errno(hw->aq.asq_last_status));
+ }
+
+ memcpy(&vf->vsi.info, &vsi_ctx.info, sizeof(vf->vsi.info));
+ return (0);
+}
+
+static int
+ixl_vf_setup_vsi(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ int error;
+
+ hw = &pf->hw;
+
+ error = ixl_vf_alloc_vsi(pf, vf);
+ if (error != 0)
+ return (error);
+
+ vf->vsi.hw_filters_add = 0;
+ vf->vsi.hw_filters_del = 0;
+ ixl_add_filter(&vf->vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+ ixl_reconfigure_filters(&vf->vsi);
+
+ return (0);
+}
+
+static void
+ixl_vf_map_vsi_queue(struct i40e_hw *hw, struct ixl_vf *vf, int qnum,
+ uint32_t val)
+{
+ uint32_t qtable;
+ int index, shift;
+
+ /*
+ * Two queues are mapped in a single register, so we have to do some
+ * gymnastics to convert the queue number into a register index and
+ * shift.
+ */
+ index = qnum / 2;
+ shift = (qnum % 2) * I40E_VSILAN_QTABLE_QINDEX_1_SHIFT;
+
+ qtable = i40e_read_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num));
+ qtable &= ~(I40E_VSILAN_QTABLE_QINDEX_0_MASK << shift);
+ qtable |= val << shift;
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(index, vf->vsi.vsi_num), qtable);
+}
+
+static void
+ixl_vf_map_queues(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t qtable;
+ int i;
+
+ hw = &pf->hw;
+
+ /*
+ * Contiguous mappings aren't actually supported by the hardware,
+ * so we have to use non-contiguous mappings.
+ */
+ i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->vsi.vsi_num),
+ I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
+
+ /* Enable LAN traffic on this VF */
+ wr32(hw, I40E_VPLAN_MAPENA(vf->vf_num),
+ I40E_VPLAN_MAPENA_TXRX_ENA_MASK);
+
+ /* Program index of each VF queue into PF queue space
+ * (This is only needed if QTABLE is enabled) */
+ for (i = 0; i < vf->vsi.num_queues; i++) {
+ qtable = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i) <<
+ I40E_VPLAN_QTABLE_QINDEX_SHIFT;
+
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num), qtable);
+ }
+ for (; i < IXL_MAX_VSI_QUEUES; i++)
+ wr32(hw, I40E_VPLAN_QTABLE(i, vf->vf_num),
+ I40E_VPLAN_QTABLE_QINDEX_MASK);
+
+ /* Map queues allocated to VF to its VSI;
+ * This mapping matches the VF-wide mapping since the VF
+ * is only given a single VSI */
+ for (i = 0; i < vf->vsi.num_queues; i++)
+ ixl_vf_map_vsi_queue(hw, vf, i,
+ ixl_pf_qidx_from_vsi_qidx(&vf->qtag, i));
+
+ /* Set rest of VSI queues as unused. */
+ for (; i < IXL_MAX_VSI_QUEUES; i++)
+ ixl_vf_map_vsi_queue(hw, vf, i,
+ I40E_VSILAN_QTABLE_QINDEX_0_MASK);
+
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_vsi_release(struct ixl_pf *pf, struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw;
+
+ hw = &pf->hw;
+
+ if (vsi->seid == 0)
+ return;
+
+ i40e_aq_delete_element(hw, vsi->seid, NULL);
+}
+
+static void
+ixl_vf_disable_queue_intr(struct i40e_hw *hw, uint32_t vfint_reg)
+{
+
+ wr32(hw, vfint_reg, I40E_VFINT_DYN_CTLN_CLEARPBA_MASK);
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_unregister_intr(struct i40e_hw *hw, uint32_t vpint_reg)
+{
+
+ wr32(hw, vpint_reg, I40E_VPINT_LNKLSTN_FIRSTQ_TYPE_MASK |
+ I40E_VPINT_LNKLSTN_FIRSTQ_INDX_MASK);
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_release_resources(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfint_reg, vpint_reg;
+ int i;
+
+ hw = &pf->hw;
+
+ ixl_vf_vsi_release(pf, &vf->vsi);
+
+ /* Index 0 has a special register. */
+ ixl_vf_disable_queue_intr(hw, I40E_VFINT_DYN_CTL0(vf->vf_num));
+
+ for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+ vfint_reg = IXL_VFINT_DYN_CTLN_REG(hw, i , vf->vf_num);
+ ixl_vf_disable_queue_intr(hw, vfint_reg);
+ }
+
+ /* Index 0 has a special register. */
+ ixl_vf_unregister_intr(hw, I40E_VPINT_LNKLST0(vf->vf_num));
+
+ for (i = 1; i < hw->func_caps.num_msix_vectors_vf; i++) {
+ vpint_reg = IXL_VPINT_LNKLSTN_REG(hw, i, vf->vf_num);
+ ixl_vf_unregister_intr(hw, vpint_reg);
+ }
+
+ vf->vsi.num_queues = 0;
+}
+
+static int
+ixl_flush_pcie(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ int i;
+ uint16_t global_vf_num;
+ uint32_t ciad;
+
+ hw = &pf->hw;
+ global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+
+ wr32(hw, I40E_PF_PCI_CIAA, IXL_PF_PCI_CIAA_VF_DEVICE_STATUS |
+ (global_vf_num << I40E_PF_PCI_CIAA_VF_NUM_SHIFT));
+ for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+ ciad = rd32(hw, I40E_PF_PCI_CIAD);
+ if ((ciad & IXL_PF_PCI_CIAD_VF_TRANS_PENDING_MASK) == 0)
+ return (0);
+ DELAY(1);
+ }
+
+ return (ETIMEDOUT);
+}
+
+static void
+ixl_reset_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfrtrig;
+
+ hw = &pf->hw;
+
+ vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+ vfrtrig |= I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+ ixl_flush(hw);
+
+ ixl_reinit_vf(pf, vf);
+}
+
+static void
+ixl_reinit_vf(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_hw *hw;
+ uint32_t vfrstat, vfrtrig;
+ int i, error;
+
+ hw = &pf->hw;
+
+ error = ixl_flush_pcie(pf, vf);
+ if (error != 0)
+ device_printf(pf->dev,
+ "Timed out waiting for PCIe activity to stop on VF-%d\n",
+ vf->vf_num);
+
+ for (i = 0; i < IXL_VF_RESET_TIMEOUT; i++) {
+ DELAY(10);
+
+ vfrstat = rd32(hw, I40E_VPGEN_VFRSTAT(vf->vf_num));
+ if (vfrstat & I40E_VPGEN_VFRSTAT_VFRD_MASK)
+ break;
+ }
+
+ if (i == IXL_VF_RESET_TIMEOUT)
+ device_printf(pf->dev, "VF %d failed to reset\n", vf->vf_num);
+
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_COMPLETED);
+
+ vfrtrig = rd32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num));
+ vfrtrig &= ~I40E_VPGEN_VFRTRIG_VFSWR_MASK;
+ wr32(hw, I40E_VPGEN_VFRTRIG(vf->vf_num), vfrtrig);
+
+ if (vf->vsi.seid != 0)
+ ixl_disable_rings(&vf->vsi);
+
+ ixl_vf_release_resources(pf, vf);
+ ixl_vf_setup_vsi(pf, vf);
+ ixl_vf_map_queues(pf, vf);
+
+ wr32(hw, I40E_VFGEN_RSTAT1(vf->vf_num), I40E_VFR_VFACTIVE);
+ ixl_flush(hw);
+}
+
+static const char *
+ixl_vc_opcode_str(uint16_t op)
+{
+
+ switch (op) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ return ("VERSION");
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ return ("RESET_VF");
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ return ("GET_VF_RESOURCES");
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ return ("CONFIG_TX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ return ("CONFIG_RX_QUEUE");
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ return ("CONFIG_VSI_QUEUES");
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ return ("CONFIG_IRQ_MAP");
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ return ("ENABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ return ("DISABLE_QUEUES");
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ return ("ADD_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ return ("DEL_ETHER_ADDRESS");
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ return ("ADD_VLAN");
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ return ("DEL_VLAN");
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ return ("CONFIG_PROMISCUOUS_MODE");
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ return ("GET_STATS");
+ case I40E_VIRTCHNL_OP_FCOE:
+ return ("FCOE");
+ case I40E_VIRTCHNL_OP_EVENT:
+ return ("EVENT");
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ return ("CONFIG_RSS_KEY");
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ return ("CONFIG_RSS_LUT");
+ case I40E_VIRTCHNL_OP_GET_RSS_HENA_CAPS:
+ return ("GET_RSS_HENA_CAPS");
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ return ("SET_RSS_HENA");
+ default:
+ return ("UNKNOWN");
+ }
+}
+
+static int
+ixl_vc_opcode_level(uint16_t opcode)
+{
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ return (10);
+ default:
+ return (5);
+ }
+}
+
+static void
+ixl_send_vf_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+ enum i40e_status_code status, void *msg, uint16_t len)
+{
+ struct i40e_hw *hw;
+ int global_vf_id;
+
+ hw = &pf->hw;
+ global_vf_id = hw->func_caps.vf_base_id + vf->vf_num;
+
+ I40E_VC_DEBUG(pf, ixl_vc_opcode_level(op),
+ "Sending msg (op=%s[%d], status=%d) to VF-%d\n",
+ ixl_vc_opcode_str(op), op, status, vf->vf_num);
+
+ i40e_aq_send_msg_to_vf(hw, global_vf_id, op, status, msg, len, NULL);
+}
+
+static void
+ixl_send_vf_ack(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op)
+{
+
+ ixl_send_vf_msg(pf, vf, op, I40E_SUCCESS, NULL, 0);
+}
+
+static void
+ixl_send_vf_nack_msg(struct ixl_pf *pf, struct ixl_vf *vf, uint16_t op,
+ enum i40e_status_code status, const char *file, int line)
+{
+
+ I40E_VC_DEBUG(pf, 1,
+ "Sending NACK (op=%s[%d], err=%s[%d]) to VF-%d from %s:%d\n",
+ ixl_vc_opcode_str(op), op, i40e_stat_str(&pf->hw, status),
+ status, vf->vf_num, file, line);
+ ixl_send_vf_msg(pf, vf, op, status, NULL, 0);
+}
+
+static void
+ixl_vf_version_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_version_info reply;
+
+ if (msg_size != sizeof(struct i40e_virtchnl_version_info)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_VERSION,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ vf->version = ((struct i40e_virtchnl_version_info *)msg)->minor;
+
+ reply.major = I40E_VIRTCHNL_VERSION_MAJOR;
+ reply.minor = I40E_VIRTCHNL_VERSION_MINOR;
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_VERSION, I40E_SUCCESS, &reply,
+ sizeof(reply));
+}
+
+static void
+ixl_vf_reset_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+
+ if (msg_size != 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_RESET_VF,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_reset_vf(pf, vf);
+
+ /* No response to a reset message. */
+}
+
+static void
+ixl_vf_get_resources_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vf_resource reply;
+
+ if ((vf->version == 0 && msg_size != 0) ||
+ (vf->version == 1 && msg_size != 4)) {
+ device_printf(pf->dev, "Invalid GET_VF_RESOURCES message size,"
+ " for VF version %d.%d\n", I40E_VIRTCHNL_VERSION_MAJOR,
+ vf->version);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ bzero(&reply, sizeof(reply));
+
+ if (vf->version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
+ reply.vf_offload_flags = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
+ else
+ /* Force VF RSS setup by PF in 1.1+ VFs */
+ reply.vf_offload_flags = *(u32 *)msg & (
+ I40E_VIRTCHNL_VF_OFFLOAD_L2 |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
+ I40E_VIRTCHNL_VF_OFFLOAD_VLAN);
+
+ reply.num_vsis = 1;
+ reply.num_queue_pairs = vf->vsi.num_queues;
+ reply.max_vectors = pf->hw.func_caps.num_msix_vectors_vf;
+ reply.rss_key_size = 52;
+ reply.rss_lut_size = 64;
+ reply.vsi_res[0].vsi_id = vf->vsi.vsi_num;
+ reply.vsi_res[0].vsi_type = I40E_VSI_SRIOV;
+ reply.vsi_res[0].num_queue_pairs = vf->vsi.num_queues;
+ memcpy(reply.vsi_res[0].default_mac_addr, vf->mac, ETHER_ADDR_LEN);
+
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_VF_RESOURCES,
+ I40E_SUCCESS, &reply, sizeof(reply));
+}
+
+static int
+ixl_vf_config_tx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+ struct i40e_virtchnl_txq_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_hmc_obj_txq txq;
+ uint16_t global_queue_num, global_vf_num;
+ enum i40e_status_code status;
+ uint32_t qtx_ctl;
+
+ hw = &pf->hw;
+ global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
+ global_vf_num = hw->func_caps.vf_base_id + vf->vf_num;
+ bzero(&txq, sizeof(txq));
+
+ DDPRINTF(pf->dev, "VF %d: PF TX queue %d / VF TX queue %d (Global VF %d)\n",
+ vf->vf_num, global_queue_num, info->queue_id, global_vf_num);
+
+ status = i40e_clear_lan_tx_queue_context(hw, global_queue_num);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ txq.base = info->dma_ring_addr / IXL_TX_CTX_BASE_UNITS;
+
+ txq.head_wb_ena = info->headwb_enabled;
+ txq.head_wb_addr = info->dma_headwb_addr;
+ txq.qlen = info->ring_len;
+ txq.rdylist = le16_to_cpu(vf->vsi.info.qs_handle[0]);
+ txq.rdylist_act = 0;
+
+ status = i40e_set_lan_tx_queue_context(hw, global_queue_num, &txq);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ qtx_ctl = I40E_QTX_CTL_VF_QUEUE |
+ (hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) |
+ (global_vf_num << I40E_QTX_CTL_VFVM_INDX_SHIFT);
+ wr32(hw, I40E_QTX_CTL(global_queue_num), qtx_ctl);
+ ixl_flush(hw);
+
+ ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, true);
+
+ return (0);
+}
+
+static int
+ixl_vf_config_rx_queue(struct ixl_pf *pf, struct ixl_vf *vf,
+ struct i40e_virtchnl_rxq_info *info)
+{
+ struct i40e_hw *hw;
+ struct i40e_hmc_obj_rxq rxq;
+ uint16_t global_queue_num;
+ enum i40e_status_code status;
+
+ hw = &pf->hw;
+ global_queue_num = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, info->queue_id);
+ bzero(&rxq, sizeof(rxq));
+
+ DDPRINTF(pf->dev, "VF %d: PF RX queue %d / VF RX queue %d\n",
+ vf->vf_num, global_queue_num, info->queue_id);
+
+ if (info->databuffer_size > IXL_VF_MAX_BUFFER)
+ return (EINVAL);
+
+ if (info->max_pkt_size > IXL_VF_MAX_FRAME ||
+ info->max_pkt_size < ETHER_MIN_LEN)
+ return (EINVAL);
+
+ if (info->splithdr_enabled) {
+ if (info->hdr_size > IXL_VF_MAX_HDR_BUFFER)
+ return (EINVAL);
+
+ rxq.hsplit_0 = info->rx_split_pos &
+ (I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_L2 |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_IP |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_TCP_UDP |
+ I40E_HMC_OBJ_RX_HSPLIT_0_SPLIT_SCTP);
+ rxq.hbuff = info->hdr_size >> I40E_RXQ_CTX_HBUFF_SHIFT;
+
+ rxq.dtype = 2;
+ }
+
+ status = i40e_clear_lan_rx_queue_context(hw, global_queue_num);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ rxq.base = info->dma_ring_addr / IXL_RX_CTX_BASE_UNITS;
+ rxq.qlen = info->ring_len;
+
+ rxq.dbuff = info->databuffer_size >> I40E_RXQ_CTX_DBUFF_SHIFT;
+
+ rxq.dsize = 1;
+ rxq.crcstrip = 1;
+ rxq.l2tsel = 1;
+
+ rxq.rxmax = info->max_pkt_size;
+ rxq.tphrdesc_ena = 1;
+ rxq.tphwdesc_ena = 1;
+ rxq.tphdata_ena = 1;
+ rxq.tphhead_ena = 1;
+ rxq.lrxqthresh = 2;
+ rxq.prefena = 1;
+
+ status = i40e_set_lan_rx_queue_context(hw, global_queue_num, &rxq);
+ if (status != I40E_SUCCESS)
+ return (EINVAL);
+
+ ixl_pf_qmgr_mark_queue_configured(&vf->qtag, info->queue_id, false);
+
+ return (0);
+}
+
+static void
+ixl_vf_config_vsi_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vsi_queue_config_info *info;
+ struct i40e_virtchnl_queue_pair_info *pair;
+ uint16_t expected_msg_size;
+ int i;
+
+ if (msg_size < sizeof(*info)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ info = msg;
+ if (info->num_queue_pairs == 0 || info->num_queue_pairs > vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: invalid # of qpairs (msg has %d, VSI has %d)\n",
+ vf->vf_num, info->num_queue_pairs, vf->vsi.num_queues);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ expected_msg_size = sizeof(*info) + info->num_queue_pairs * sizeof(*pair);
+ if (msg_size != expected_msg_size) {
+ device_printf(pf->dev, "VF %d: size of recvd message (%d) does not match expected size (%d)\n",
+ vf->vf_num, msg_size, expected_msg_size);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (info->vsi_id != vf->vsi.vsi_num) {
+ device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
+ vf->vf_num, info->vsi_id, vf->vsi.vsi_num);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < info->num_queue_pairs; i++) {
+ pair = &info->qpair[i];
+
+ if (pair->txq.vsi_id != vf->vsi.vsi_num ||
+ pair->rxq.vsi_id != vf->vsi.vsi_num ||
+ pair->txq.queue_id != pair->rxq.queue_id ||
+ pair->txq.queue_id >= vf->vsi.num_queues) {
+
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (ixl_vf_config_tx_queue(pf, vf, &pair->txq) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (ixl_vf_config_rx_queue(pf, vf, &pair->rxq) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES);
+}
+
+static void
+ixl_vf_set_qctl(struct ixl_pf *pf,
+ const struct i40e_virtchnl_vector_map *vector,
+ enum i40e_queue_type cur_type, uint16_t cur_queue,
+ enum i40e_queue_type *last_type, uint16_t *last_queue)
+{
+ uint32_t offset, qctl;
+ uint16_t itr_indx;
+
+ if (cur_type == I40E_QUEUE_TYPE_RX) {
+ offset = I40E_QINT_RQCTL(cur_queue);
+ itr_indx = vector->rxitr_idx;
+ } else {
+ offset = I40E_QINT_TQCTL(cur_queue);
+ itr_indx = vector->txitr_idx;
+ }
+
+ qctl = htole32((vector->vector_id << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (*last_type << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT) |
+ (*last_queue << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (itr_indx << I40E_QINT_RQCTL_ITR_INDX_SHIFT));
+
+ wr32(&pf->hw, offset, qctl);
+
+ *last_type = cur_type;
+ *last_queue = cur_queue;
+}
+
+static void
+ixl_vf_config_vector(struct ixl_pf *pf, struct ixl_vf *vf,
+ const struct i40e_virtchnl_vector_map *vector)
+{
+ struct i40e_hw *hw;
+ u_int qindex;
+ enum i40e_queue_type type, last_type;
+ uint32_t lnklst_reg;
+ uint16_t rxq_map, txq_map, cur_queue, last_queue;
+
+ hw = &pf->hw;
+
+ rxq_map = vector->rxq_map;
+ txq_map = vector->txq_map;
+
+ last_queue = IXL_END_OF_INTR_LNKLST;
+ last_type = I40E_QUEUE_TYPE_RX;
+
+ /*
+ * The datasheet says to optimize performance, RX queues and TX queues
+ * should be interleaved in the interrupt linked list, so we process
+ * both at once here.
+ */
+ while ((rxq_map != 0) || (txq_map != 0)) {
+ if (txq_map != 0) {
+ qindex = ffs(txq_map) - 1;
+ type = I40E_QUEUE_TYPE_TX;
+ cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
+ ixl_vf_set_qctl(pf, vector, type, cur_queue,
+ &last_type, &last_queue);
+ txq_map &= ~(1 << qindex);
+ }
+
+ if (rxq_map != 0) {
+ qindex = ffs(rxq_map) - 1;
+ type = I40E_QUEUE_TYPE_RX;
+ cur_queue = ixl_pf_qidx_from_vsi_qidx(&vf->qtag, qindex);
+ ixl_vf_set_qctl(pf, vector, type, cur_queue,
+ &last_type, &last_queue);
+ rxq_map &= ~(1 << qindex);
+ }
+ }
+
+ if (vector->vector_id == 0)
+ lnklst_reg = I40E_VPINT_LNKLST0(vf->vf_num);
+ else
+ lnklst_reg = IXL_VPINT_LNKLSTN_REG(hw, vector->vector_id,
+ vf->vf_num);
+ wr32(hw, lnklst_reg,
+ (last_queue << I40E_VPINT_LNKLST0_FIRSTQ_INDX_SHIFT) |
+ (last_type << I40E_VPINT_LNKLST0_FIRSTQ_TYPE_SHIFT));
+
+ ixl_flush(hw);
+}
+
+static void
+ixl_vf_config_irq_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_irq_map_info *map;
+ struct i40e_virtchnl_vector_map *vector;
+ struct i40e_hw *hw;
+ int i, largest_txq, largest_rxq;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*map)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ map = msg;
+ if (map->num_vectors == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (msg_size != sizeof(*map) + map->num_vectors * sizeof(*vector)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < map->num_vectors; i++) {
+ vector = &map->vecmap[i];
+
+ if ((vector->vector_id >= hw->func_caps.num_msix_vectors_vf) ||
+ vector->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (vector->rxq_map != 0) {
+ largest_rxq = fls(vector->rxq_map) - 1;
+ if (largest_rxq >= vf->vsi.num_queues) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (vector->txq_map != 0) {
+ largest_txq = fls(vector->txq_map) - 1;
+ if (largest_txq >= vf->vsi.num_queues) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (vector->rxitr_idx > IXL_MAX_ITR_IDX ||
+ vector->txitr_idx > IXL_MAX_ITR_IDX) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_vf_config_vector(pf, vf, vector);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP);
+}
+
+static void
+ixl_vf_enable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *select;
+ int error = 0;
+
+ if (msg_size != sizeof(*select)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ select = msg;
+ if (select->vsi_id != vf->vsi.vsi_num ||
+ select->rx_queues == 0 || select->tx_queues == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Enable TX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->tx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
+ continue;
+ /* Warn if this queue is already marked as enabled */
+ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true))
+ device_printf(pf->dev, "VF %d: TX ring %d is already enabled!\n",
+ vf->vf_num, i);
+
+ error = ixl_enable_tx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, true);
+ }
+ }
+
+ /* Enable RX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->rx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
+ continue;
+ /* Warn if this queue is already marked as enabled */
+ if (ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false))
+ device_printf(pf->dev, "VF %d: RX ring %d is already enabled!\n",
+ vf->vf_num, i);
+ error = ixl_enable_rx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_enabled(&vf->qtag, i, false);
+ }
+ }
+
+ if (error) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES,
+ I40E_ERR_TIMEOUT);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ENABLE_QUEUES);
+}
+
+static void
+ixl_vf_disable_queues_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+ void *msg, uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *select;
+ int error = 0;
+
+ if (msg_size != sizeof(*select)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ select = msg;
+ if (select->vsi_id != vf->vsi.vsi_num ||
+ select->rx_queues == 0 || select->tx_queues == 0) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Disable TX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->tx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: TX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, true))
+ continue;
+ /* Warn if this queue is already marked as disabled */
+ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, true)) {
+ device_printf(pf->dev, "VF %d: TX ring %d is already disabled!\n",
+ vf->vf_num, i);
+ continue;
+ }
+ error = ixl_disable_tx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, true);
+ }
+ }
+
+ /* Enable RX rings selected by the VF */
+ for (int i = 0; i < 32; i++) {
+ if ((1 << i) & select->rx_queues) {
+ /* Warn if queue is out of VF allocation range */
+ if (i >= vf->vsi.num_queues) {
+ device_printf(pf->dev, "VF %d: RX ring %d is outside of VF VSI allocation!\n",
+ vf->vf_num, i);
+ break;
+ }
+ /* Skip this queue if it hasn't been configured */
+ if (!ixl_pf_qmgr_is_queue_configured(&vf->qtag, i, false))
+ continue;
+ /* Warn if this queue is already marked as disabled */
+ if (!ixl_pf_qmgr_is_queue_enabled(&vf->qtag, i, false)) {
+ device_printf(pf->dev, "VF %d: RX ring %d is already disabled!\n",
+ vf->vf_num, i);
+ continue;
+ }
+ error = ixl_disable_rx_ring(pf, &vf->qtag, i);
+ if (error)
+ break;
+ else
+ ixl_pf_qmgr_mark_queue_disabled(&vf->qtag, i, false);
+ }
+ }
+
+ if (error) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES,
+ I40E_ERR_TIMEOUT);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DISABLE_QUEUES);
+}
+
+static bool
+ixl_zero_mac(const uint8_t *addr)
+{
+ uint8_t zero[ETHER_ADDR_LEN] = {0, 0, 0, 0, 0, 0};
+
+ return (cmp_etheraddr(addr, zero));
+}
+
+static bool
+ixl_bcast_mac(const uint8_t *addr)
+{
+
+ return (cmp_etheraddr(addr, ixl_bcast_addr));
+}
+
+static int
+ixl_vf_mac_valid(struct ixl_vf *vf, const uint8_t *addr)
+{
+
+ if (ixl_zero_mac(addr) || ixl_bcast_mac(addr))
+ return (EINVAL);
+
+ /*
+ * If the VF is not allowed to change its MAC address, don't let it
+ * set a MAC filter for an address that is not a multicast address and
+ * is not its assigned MAC.
+ */
+ if (!(vf->vf_flags & VF_FLAG_SET_MAC_CAP) &&
+ !(ETHER_IS_MULTICAST(addr) || cmp_etheraddr(addr, vf->mac)))
+ return (EPERM);
+
+ return (0);
+}
+
+static void
+ixl_vf_add_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_ether_addr_list *addr_list;
+ struct i40e_virtchnl_ether_addr *addr;
+ struct ixl_vsi *vsi;
+ int i;
+ size_t expected_size;
+
+ vsi = &vf->vsi;
+
+ if (msg_size < sizeof(*addr_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ addr_list = msg;
+ expected_size = sizeof(*addr_list) +
+ addr_list->num_elements * sizeof(*addr);
+
+ if (addr_list->num_elements == 0 ||
+ addr_list->vsi_id != vsi->vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ if (ixl_vf_mac_valid(vf, addr_list->list[i].addr) != 0) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ ixl_add_filter(vsi, addr->addr, IXL_VLAN_ANY);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS);
+}
+
+static void
+ixl_vf_del_mac_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_ether_addr_list *addr_list;
+ struct i40e_virtchnl_ether_addr *addr;
+ size_t expected_size;
+ int i;
+
+ if (msg_size < sizeof(*addr_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ addr_list = msg;
+ expected_size = sizeof(*addr_list) +
+ addr_list->num_elements * sizeof(*addr);
+
+ if (addr_list->num_elements == 0 ||
+ addr_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ if (ixl_zero_mac(addr->addr) || ixl_bcast_mac(addr->addr)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS, I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ for (i = 0; i < addr_list->num_elements; i++) {
+ addr = &addr_list->list[i];
+ ixl_del_filter(&vf->vsi, addr->addr, IXL_VLAN_ANY);
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS);
+}
+
+static enum i40e_status_code
+ixl_vf_enable_vlan_strip(struct ixl_pf *pf, struct ixl_vf *vf)
+{
+ struct i40e_vsi_context vsi_ctx;
+
+ vsi_ctx.seid = vf->vsi.seid;
+
+ bzero(&vsi_ctx.info, sizeof(vsi_ctx.info));
+ vsi_ctx.info.valid_sections = htole16(I40E_AQ_VSI_PROP_VLAN_VALID);
+ vsi_ctx.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL |
+ I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ return (i40e_aq_update_vsi_params(&pf->hw, &vsi_ctx, NULL));
+}
+
+static void
+ixl_vf_add_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vlan_filter_list *filter_list;
+ enum i40e_status_code code;
+ size_t expected_size;
+ int i;
+
+ if (msg_size < sizeof(*filter_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ filter_list = msg;
+ expected_size = sizeof(*filter_list) +
+ filter_list->num_elements * sizeof(uint16_t);
+ if (filter_list->num_elements == 0 ||
+ filter_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++) {
+ if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ code = ixl_vf_enable_vlan_strip(pf, vf);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++)
+ ixl_add_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN);
+}
+
+static void
+ixl_vf_del_vlan_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_vlan_filter_list *filter_list;
+ int i;
+ size_t expected_size;
+
+ if (msg_size < sizeof(*filter_list)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ filter_list = msg;
+ expected_size = sizeof(*filter_list) +
+ filter_list->num_elements * sizeof(uint16_t);
+ if (filter_list->num_elements == 0 ||
+ filter_list->vsi_id != vf->vsi.vsi_num ||
+ msg_size != expected_size) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++) {
+ if (filter_list->vlan_id[i] > EVL_VLID_MASK) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_VLAN_CAP)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_ADD_VLAN,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ for (i = 0; i < filter_list->num_elements; i++)
+ ixl_del_filter(&vf->vsi, vf->mac, filter_list->vlan_id[i]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_DEL_VLAN);
+}
+
+static void
+ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf,
+ void *msg, uint16_t msg_size)
+{
+ struct i40e_virtchnl_promisc_info *info;
+ enum i40e_status_code code;
+
+ if (msg_size != sizeof(*info)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ info = msg;
+ if (info->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM);
+ return;
+ }
+
+ code = i40e_aq_set_vsi_unicast_promiscuous(&pf->hw, info->vsi_id,
+ info->flags & I40E_FLAG_VF_UNICAST_PROMISC, NULL, TRUE);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ return;
+ }
+
+ code = i40e_aq_set_vsi_multicast_promiscuous(&pf->hw, info->vsi_id,
+ info->flags & I40E_FLAG_VF_MULTICAST_PROMISC, NULL);
+ if (code != I40E_SUCCESS) {
+ i40e_send_vf_nack(pf, vf,
+ I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, code);
+ return;
+ }
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE);
+}
+
+static void
+ixl_vf_get_stats_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_virtchnl_queue_select *queue;
+
+ if (msg_size != sizeof(*queue)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ queue = msg;
+ if (queue->vsi_id != vf->vsi.vsi_num) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ ixl_update_eth_stats(&vf->vsi);
+
+ ixl_send_vf_msg(pf, vf, I40E_VIRTCHNL_OP_GET_STATS,
+ I40E_SUCCESS, &vf->vsi.eth_stats, sizeof(vf->vsi.eth_stats));
+}
+
+static void
+ixl_vf_config_rss_key_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_hw *hw;
+ struct i40e_virtchnl_rss_key *key;
+ struct i40e_aqc_get_set_rss_key_data key_data;
+ enum i40e_status_code status;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*key)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ key = msg;
+
+ if (key->key_len > 52) {
+ device_printf(pf->dev, "VF %d: Key size in msg (%d) is greater than max key size (%d)\n",
+ vf->vf_num, key->key_len, 52);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (key->vsi_id != vf->vsi.vsi_num) {
+ device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
+ vf->vf_num, key->vsi_id, vf->vsi.vsi_num);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Fill out hash using MAC-dependent method */
+ if (hw->mac.type == I40E_MAC_X722) {
+ bzero(&key_data, sizeof(key_data));
+ if (key->key_len <= 40)
+ bcopy(key->key, key_data.standard_rss_key, key->key_len);
+ else {
+ bcopy(key->key, key_data.standard_rss_key, 40);
+ bcopy(&key->key[40], key_data.extended_hash_key, key->key_len - 40);
+ }
+ status = i40e_aq_set_rss_key(hw, vf->vsi.vsi_num, &key_data);
+ if (status) {
+ device_printf(pf->dev, "i40e_aq_set_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ I40E_ERR_ADMIN_QUEUE_ERROR);
+ return;
+ }
+ } else {
+ for (int i = 0; i < (key->key_len / 4); i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HKEY1(i, vf->vf_num), ((u32 *)key->key)[i]);
+ }
+
+ DDPRINTF(pf->dev, "VF %d: Programmed key starting with 0x%x ok!",
+ vf->vf_num, key->key[0]);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY);
+}
+
+static void
+ixl_vf_config_rss_lut_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_hw *hw;
+ struct i40e_virtchnl_rss_lut *lut;
+ enum i40e_status_code status;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*lut)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ lut = msg;
+
+ if (lut->lut_entries > 64) {
+ device_printf(pf->dev, "VF %d: # of LUT entries in msg (%d) is greater than max (%d)\n",
+ vf->vf_num, lut->lut_entries, 64);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ if (lut->vsi_id != vf->vsi.vsi_num) {
+ device_printf(pf->dev, "VF %d: VSI id in recvd message (%d) does not match expected id (%d)\n",
+ vf->vf_num, lut->vsi_id, vf->vsi.vsi_num);
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ /* Fill out LUT using MAC-dependent method */
+ if (hw->mac.type == I40E_MAC_X722) {
+ status = i40e_aq_set_rss_lut(hw, vf->vsi.vsi_num, false, lut->lut, lut->lut_entries);
+ if (status) {
+ device_printf(pf->dev, "i40e_aq_set_rss_lut status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ I40E_ERR_ADMIN_QUEUE_ERROR);
+ return;
+ }
+ } else {
+ for (int i = 0; i < (lut->lut_entries / 4); i++)
+ i40e_write_rx_ctl(hw, I40E_VFQF_HLUT1(i, vf->vf_num), ((u32 *)lut->lut)[i]);
+ }
+
+ DDPRINTF(pf->dev, "VF %d: Programmed LUT starting with 0x%x and length %d ok!",
+ vf->vf_num, lut->lut[0], lut->lut_entries);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT);
+}
+
+static void
+ixl_vf_set_rss_hena_msg(struct ixl_pf *pf, struct ixl_vf *vf, void *msg,
+ uint16_t msg_size)
+{
+ struct i40e_hw *hw;
+ struct i40e_virtchnl_rss_hena *hena;
+
+ hw = &pf->hw;
+
+ if (msg_size < sizeof(*hena)) {
+ i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ I40E_ERR_PARAM);
+ return;
+ }
+
+ hena = msg;
+
+ /* Set HENA */
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(0, vf->vf_num), (u32)hena->hena);
+ i40e_write_rx_ctl(hw, I40E_VFQF_HENA1(1, vf->vf_num), (u32)(hena->hena >> 32));
+
+ DDPRINTF(pf->dev, "VF %d: Programmed HENA with 0x%016lx",
+ vf->vf_num, hena->hena);
+
+ ixl_send_vf_ack(pf, vf, I40E_VIRTCHNL_OP_SET_RSS_HENA);
+}
+
+void
+ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event)
+{
+ struct ixl_vf *vf;
+ void *msg;
+ uint16_t vf_num, msg_size;
+ uint32_t opcode;
+
+ vf_num = le16toh(event->desc.retval) - pf->hw.func_caps.vf_base_id;
+ opcode = le32toh(event->desc.cookie_high);
+
+ if (vf_num >= pf->num_vfs) {
+ device_printf(pf->dev, "Got msg from illegal VF: %d\n", vf_num);
+ return;
+ }
+
+ vf = &pf->vfs[vf_num];
+ msg = event->msg_buf;
+ msg_size = event->msg_len;
+
+ I40E_VC_DEBUG(pf, ixl_vc_opcode_level(opcode),
+ "Got msg %s(%d) from%sVF-%d of size %d\n",
+ ixl_vc_opcode_str(opcode), opcode,
+ (vf->vf_flags & VF_FLAG_ENABLED) ? " " : " disabled ",
+ vf_num, msg_size);
+
+ /* This must be a stray msg from a previously destroyed VF. */
+ if (!(vf->vf_flags & VF_FLAG_ENABLED))
+ return;
+
+ switch (opcode) {
+ case I40E_VIRTCHNL_OP_VERSION:
+ ixl_vf_version_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_RESET_VF:
+ ixl_vf_reset_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
+ ixl_vf_get_resources_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
+ ixl_vf_config_vsi_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_IRQ_MAP:
+ ixl_vf_config_irq_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ENABLE_QUEUES:
+ ixl_vf_enable_queues_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
+ ixl_vf_disable_queues_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_ETHER_ADDRESS:
+ ixl_vf_add_mac_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_ETHER_ADDRESS:
+ ixl_vf_del_mac_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_ADD_VLAN:
+ ixl_vf_add_vlan_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_DEL_VLAN:
+ ixl_vf_del_vlan_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE:
+ ixl_vf_config_promisc_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_GET_STATS:
+ ixl_vf_get_stats_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ixl_vf_config_rss_key_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ixl_vf_config_rss_lut_msg(pf, vf, msg, msg_size);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ixl_vf_set_rss_hena_msg(pf, vf, msg, msg_size);
+ break;
+
+ /* These two opcodes have been superseded by CONFIG_VSI_QUEUES. */
+ case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
+ case I40E_VIRTCHNL_OP_CONFIG_RX_QUEUE:
+ default:
+ i40e_send_vf_nack(pf, vf, opcode, I40E_ERR_NOT_IMPLEMENTED);
+ break;
+ }
+}
+
+/* Handle any VFs that have reset themselves via a Function Level Reset(FLR). */
+void
+ixl_handle_vflr(void *arg, int pending)
+{
+ struct ixl_pf *pf;
+ struct ixl_vf *vf;
+ struct i40e_hw *hw;
+ uint16_t global_vf_num;
+ uint32_t vflrstat_index, vflrstat_mask, vflrstat, icr0;
+ int i;
+
+ pf = arg;
+ hw = &pf->hw;
+
+ IXL_PF_LOCK(pf);
+ for (i = 0; i < pf->num_vfs; i++) {
+ global_vf_num = hw->func_caps.vf_base_id + i;
+
+ vf = &pf->vfs[i];
+ if (!(vf->vf_flags & VF_FLAG_ENABLED))
+ continue;
+
+ vflrstat_index = IXL_GLGEN_VFLRSTAT_INDEX(global_vf_num);
+ vflrstat_mask = IXL_GLGEN_VFLRSTAT_MASK(global_vf_num);
+ vflrstat = rd32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index));
+ if (vflrstat & vflrstat_mask) {
+ wr32(hw, I40E_GLGEN_VFLRSTAT(vflrstat_index),
+ vflrstat_mask);
+
+ ixl_reinit_vf(pf, vf);
+ }
+ }
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0_ENA);
+ icr0 |= I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, icr0);
+ ixl_flush(hw);
+
+ IXL_PF_UNLOCK(pf);
+}
+
+static int
+ixl_adminq_err_to_errno(enum i40e_admin_queue_err err)
+{
+
+ switch (err) {
+ case I40E_AQ_RC_EPERM:
+ return (EPERM);
+ case I40E_AQ_RC_ENOENT:
+ return (ENOENT);
+ case I40E_AQ_RC_ESRCH:
+ return (ESRCH);
+ case I40E_AQ_RC_EINTR:
+ return (EINTR);
+ case I40E_AQ_RC_EIO:
+ return (EIO);
+ case I40E_AQ_RC_ENXIO:
+ return (ENXIO);
+ case I40E_AQ_RC_E2BIG:
+ return (E2BIG);
+ case I40E_AQ_RC_EAGAIN:
+ return (EAGAIN);
+ case I40E_AQ_RC_ENOMEM:
+ return (ENOMEM);
+ case I40E_AQ_RC_EACCES:
+ return (EACCES);
+ case I40E_AQ_RC_EFAULT:
+ return (EFAULT);
+ case I40E_AQ_RC_EBUSY:
+ return (EBUSY);
+ case I40E_AQ_RC_EEXIST:
+ return (EEXIST);
+ case I40E_AQ_RC_EINVAL:
+ return (EINVAL);
+ case I40E_AQ_RC_ENOTTY:
+ return (ENOTTY);
+ case I40E_AQ_RC_ENOSPC:
+ return (ENOSPC);
+ case I40E_AQ_RC_ENOSYS:
+ return (ENOSYS);
+ case I40E_AQ_RC_ERANGE:
+ return (ERANGE);
+ case I40E_AQ_RC_EFLUSHED:
+ return (EINVAL); /* No exact equivalent in errno.h */
+ case I40E_AQ_RC_BAD_ADDR:
+ return (EFAULT);
+ case I40E_AQ_RC_EMODE:
+ return (EPERM);
+ case I40E_AQ_RC_EFBIG:
+ return (EFBIG);
+ default:
+ return (EINVAL);
+ }
+}
+
+int
+ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *pf_vsi;
+ enum i40e_status_code ret;
+ int i, error;
+
+ pf = device_get_softc(dev);
+ hw = &pf->hw;
+ pf_vsi = &pf->vsi;
+
+ IXL_PF_LOCK(pf);
+ pf->vfs = malloc(sizeof(struct ixl_vf) * num_vfs, M_IXL, M_NOWAIT |
+ M_ZERO);
+
+ if (pf->vfs == NULL) {
+ error = ENOMEM;
+ goto fail;
+ }
+
+ for (i = 0; i < num_vfs; i++)
+ sysctl_ctx_init(&pf->vfs[i].ctx);
+
+ ret = i40e_aq_add_veb(hw, pf_vsi->uplink_seid, pf_vsi->seid,
+ 1, FALSE, &pf->veb_seid, FALSE, NULL);
+ if (ret != I40E_SUCCESS) {
+ error = ixl_adminq_err_to_errno(hw->aq.asq_last_status);
+ device_printf(dev, "add_veb failed; code=%d error=%d", ret,
+ error);
+ goto fail;
+ }
+
+ ixl_enable_adminq(hw);
+
+ pf->num_vfs = num_vfs;
+ IXL_PF_UNLOCK(pf);
+ return (0);
+
+fail:
+ free(pf->vfs, M_IXL);
+ pf->vfs = NULL;
+ IXL_PF_UNLOCK(pf);
+ return (error);
+}
+
+void
+ixl_iov_uninit(device_t dev)
+{
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ struct ixl_vsi *vsi;
+ struct ifnet *ifp;
+ struct ixl_vf *vfs;
+ int i, num_vfs;
+
+ pf = device_get_softc(dev);
+ hw = &pf->hw;
+ vsi = &pf->vsi;
+ ifp = vsi->ifp;
+
+ IXL_PF_LOCK(pf);
+ for (i = 0; i < pf->num_vfs; i++) {
+ if (pf->vfs[i].vsi.seid != 0)
+ i40e_aq_delete_element(hw, pf->vfs[i].vsi.seid, NULL);
+ ixl_pf_qmgr_release(&pf->qmgr, &pf->vfs[i].qtag);
+ DDPRINTF(dev, "VF %d: %d released\n",
+ i, pf->vfs[i].qtag.num_allocated);
+ DDPRINTF(dev, "Unallocated total: %d\n", ixl_pf_qmgr_get_num_free(&pf->qmgr));
+ }
+
+ if (pf->veb_seid != 0) {
+ i40e_aq_delete_element(hw, pf->veb_seid, NULL);
+ pf->veb_seid = 0;
+ }
+
+ if ((if_getdrvflags(ifp) & IFF_DRV_RUNNING) == 0) {
+ ixl_disable_intr(vsi);
+ ixl_flush(hw);
+ }
+
+ vfs = pf->vfs;
+ num_vfs = pf->num_vfs;
+
+ pf->vfs = NULL;
+ pf->num_vfs = 0;
+ IXL_PF_UNLOCK(pf);
+
+ /* Do this after the unlock as sysctl_ctx_free might sleep. */
+ for (i = 0; i < num_vfs; i++)
+ sysctl_ctx_free(&vfs[i].ctx);
+ free(vfs, M_IXL);
+}
+
+static int
+ixl_vf_reserve_queues(struct ixl_pf *pf, struct ixl_vf *vf, int num_queues)
+{
+ device_t dev = pf->dev;
+ int error;
+
+ /* Validate, and clamp value if invalid */
+ if (num_queues < 1 || num_queues > 16)
+ device_printf(dev, "Invalid num-queues (%d) for VF %d\n",
+ num_queues, vf->vf_num);
+ if (num_queues < 1) {
+ device_printf(dev, "Setting VF %d num-queues to 1\n", vf->vf_num);
+ num_queues = 1;
+ } else if (num_queues > 16) {
+ device_printf(dev, "Setting VF %d num-queues to 16\n", vf->vf_num);
+ num_queues = 16;
+ }
+ error = ixl_pf_qmgr_alloc_scattered(&pf->qmgr, num_queues, &vf->qtag);
+ if (error) {
+ device_printf(dev, "Error allocating %d queues for VF %d's VSI\n",
+ num_queues, vf->vf_num);
+ return (ENOSPC);
+ }
+
+ DDPRINTF(dev, "VF %d: %d allocated, %d active",
+ vf->vf_num, vf->qtag.num_allocated, vf->qtag.num_active);
+ DDPRINTF(dev, "Unallocated total: %d", ixl_pf_qmgr_get_num_free(&pf->qmgr));
+
+ return (0);
+}
+
+int
+ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params)
+{
+ char sysctl_name[QUEUE_NAME_LEN];
+ struct ixl_pf *pf;
+ struct ixl_vf *vf;
+ const void *mac;
+ size_t size;
+ int error;
+ int vf_num_queues;
+
+ pf = device_get_softc(dev);
+ vf = &pf->vfs[vfnum];
+
+ IXL_PF_LOCK(pf);
+ vf->vf_num = vfnum;
+
+ vf->vsi.back = pf;
+ vf->vf_flags = VF_FLAG_ENABLED;
+ SLIST_INIT(&vf->vsi.ftl);
+
+ /* Reserve queue allocation from PF */
+ vf_num_queues = nvlist_get_number(params, "num-queues");
+ error = ixl_vf_reserve_queues(pf, vf, vf_num_queues);
+ if (error != 0)
+ goto out;
+
+ error = ixl_vf_setup_vsi(pf, vf);
+ if (error != 0)
+ goto out;
+
+ if (nvlist_exists_binary(params, "mac-addr")) {
+ mac = nvlist_get_binary(params, "mac-addr", &size);
+ bcopy(mac, vf->mac, ETHER_ADDR_LEN);
+
+ if (nvlist_get_bool(params, "allow-set-mac"))
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+ } else
+ /*
+ * If the administrator has not specified a MAC address then
+ * we must allow the VF to choose one.
+ */
+ vf->vf_flags |= VF_FLAG_SET_MAC_CAP;
+
+ if (nvlist_get_bool(params, "mac-anti-spoof"))
+ vf->vf_flags |= VF_FLAG_MAC_ANTI_SPOOF;
+
+ if (nvlist_get_bool(params, "allow-promisc"))
+ vf->vf_flags |= VF_FLAG_PROMISC_CAP;
+
+ vf->vf_flags |= VF_FLAG_VLAN_CAP;
+
+ ixl_reset_vf(pf, vf);
+out:
+ IXL_PF_UNLOCK(pf);
+ if (error == 0) {
+ snprintf(sysctl_name, sizeof(sysctl_name), "vf%d", vfnum);
+ ixl_add_vsi_sysctls(pf, &vf->vsi, &vf->ctx, sysctl_name);
+ }
+
+ return (error);
+}
+
diff --git a/sys/dev/ixl/ixl_pf_iov.h b/sys/dev/ixl/ixl_pf_iov.h
new file mode 100644
index 000000000000..ae8abc208d4b
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_iov.h
@@ -0,0 +1,62 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#ifndef _IXL_PF_IOV_H_
+#define _IXL_PF_IOV_H_
+
+#include "ixl_pf.h"
+
+#include <sys/nv.h>
+#include <sys/iov_schema.h>
+#include <dev/pci/pci_iov.h>
+
+/* Public functions */
+
+/*
+ * These three are DEVMETHODs required for SR-IOV PF support.
+ */
+int ixl_iov_init(device_t dev, uint16_t num_vfs, const nvlist_t *params);
+void ixl_iov_uninit(device_t dev);
+int ixl_add_vf(device_t dev, uint16_t vfnum, const nvlist_t *params);
+
+/*
+ * The standard PF driver needs to call these during normal execution when
+ * SR-IOV mode is active.
+ */
+void ixl_initialize_sriov(struct ixl_pf *pf);
+void ixl_handle_vf_msg(struct ixl_pf *pf, struct i40e_arq_event_info *event);
+void ixl_handle_vflr(void *arg, int pending);
+
+#endif /* _IXL_PF_IOV_H_ */
diff --git a/sys/dev/ixl/ixl_pf_main.c b/sys/dev/ixl/ixl_pf_main.c
new file mode 100644
index 000000000000..d8da4cfee106
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_main.c
@@ -0,0 +1,5557 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixl_pf.h"
+
+#ifdef PCI_IOV
+#include "ixl_pf_iov.h"
+#endif
+
+#ifdef DEV_NETMAP
+#include <net/netmap.h>
+#include <sys/selinfo.h>
+#include <dev/netmap/netmap_kern.h>
+#endif /* DEV_NETMAP */
+
+static int ixl_setup_queue(struct ixl_queue *, struct ixl_pf *, int);
+
+/* Sysctls */
+static int ixl_set_flowcntl(SYSCTL_HANDLER_ARGS);
+static int ixl_set_advertise(SYSCTL_HANDLER_ARGS);
+static int ixl_current_speed(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS);
+
+/* Debug Sysctls */
+static int ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS);
+static int ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS);
+
+void
+ixl_dbg(struct ixl_pf *pf, enum ixl_dbg_mask mask, char *fmt, ...)
+{
+ va_list args;
+
+ if (!(mask & pf->dbg_mask))
+ return;
+
+ va_start(args, fmt);
+ device_printf(pf->dev, fmt, args);
+ va_end(args);
+}
+
+/*
+** Put the FW, API, NVM, EEtrackID, and OEM version information into a string
+*/
+void
+ixl_nvm_version_str(struct i40e_hw *hw, struct sbuf *buf)
+{
+ u8 oem_ver = (u8)(hw->nvm.oem_ver >> 24);
+ u16 oem_build = (u16)((hw->nvm.oem_ver >> 16) & 0xFFFF);
+ u8 oem_patch = (u8)(hw->nvm.oem_ver & 0xFF);
+
+ sbuf_printf(buf,
+ "fw %d.%d.%05d api %d.%d nvm %x.%02x etid %08x oem %d.%d.%d",
+ hw->aq.fw_maj_ver, hw->aq.fw_min_ver, hw->aq.fw_build,
+ hw->aq.api_maj_ver, hw->aq.api_min_ver,
+ (hw->nvm.version & IXL_NVM_VERSION_HI_MASK) >>
+ IXL_NVM_VERSION_HI_SHIFT,
+ (hw->nvm.version & IXL_NVM_VERSION_LO_MASK) >>
+ IXL_NVM_VERSION_LO_SHIFT,
+ hw->nvm.eetrack,
+ oem_ver, oem_build, oem_patch);
+}
+
+void
+ixl_print_nvm_version(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *sbuf;
+
+ sbuf = sbuf_new_auto();
+ ixl_nvm_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ device_printf(dev, "%s\n", sbuf_data(sbuf));
+ sbuf_delete(sbuf);
+}
+
+static void
+ixl_configure_tx_itr(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ vsi->tx_itr_setting = pf->tx_itr;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR, i),
+ vsi->tx_itr_setting);
+ txr->itr = vsi->tx_itr_setting;
+ txr->latency = IXL_AVE_LATENCY;
+ }
+}
+
+static void
+ixl_configure_rx_itr(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+
+ vsi->rx_itr_setting = pf->rx_itr;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct rx_ring *rxr = &que->rxr;
+
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR, i),
+ vsi->rx_itr_setting);
+ rxr->itr = vsi->rx_itr_setting;
+ rxr->latency = IXL_AVE_LATENCY;
+ }
+}
+
+/*
+ * Write PF ITR values to queue ITR registers.
+ */
+void
+ixl_configure_itr(struct ixl_pf *pf)
+{
+ ixl_configure_tx_itr(pf);
+ ixl_configure_rx_itr(pf);
+}
+
+
+/*********************************************************************
+ * Init entry point
+ *
+ * This routine is used in two ways. It is used by the stack as
+ * init entry point in network interface structure. It is also used
+ * by the driver as a hw/sw initialization routine to get to a
+ * consistent state.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+void
+ixl_init_locked(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ifnet *ifp = vsi->ifp;
+ device_t dev = pf->dev;
+ struct i40e_filter_control_settings filter;
+ u8 tmpaddr[ETHER_ADDR_LEN];
+ int ret;
+
+ mtx_assert(&pf->pf_mtx, MA_OWNED);
+ INIT_DEBUGOUT("ixl_init_locked: begin");
+
+ ixl_stop_locked(pf);
+
+ /* Get the latest mac address... User might use a LAA */
+ bcopy(IF_LLADDR(vsi->ifp), tmpaddr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ if (!cmp_etheraddr(hw->mac.addr, tmpaddr) &&
+ (i40e_validate_mac_addr(tmpaddr) == I40E_SUCCESS)) {
+ ixl_del_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+ bcopy(tmpaddr, hw->mac.addr,
+ I40E_ETH_LENGTH_OF_ADDRESS);
+ ret = i40e_aq_mac_address_write(hw,
+ I40E_AQC_WRITE_TYPE_LAA_ONLY,
+ hw->mac.addr, NULL);
+ if (ret) {
+ device_printf(dev, "LLA address"
+ "change failed!!\n");
+ return;
+ }
+ }
+
+ ixl_add_filter(vsi, hw->mac.addr, IXL_VLAN_ANY);
+
+ /* Set the various hardware offload abilities */
+ ifp->if_hwassist = 0;
+ if (ifp->if_capenable & IFCAP_TSO)
+ ifp->if_hwassist |= CSUM_TSO;
+ if (ifp->if_capenable & IFCAP_TXCSUM)
+ ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP);
+ if (ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ ifp->if_hwassist |= (CSUM_TCP_IPV6 | CSUM_UDP_IPV6);
+
+ /* Set up the device filtering */
+ bzero(&filter, sizeof(filter));
+ filter.enable_ethtype = TRUE;
+ filter.enable_macvlan = TRUE;
+ filter.enable_fdir = FALSE;
+ filter.hash_lut_size = I40E_HASH_LUT_SIZE_512;
+ if (i40e_set_filter_control(hw, &filter))
+ device_printf(dev, "i40e_set_filter_control() failed\n");
+
+ /* Prepare the VSI: rings, hmc contexts, etc... */
+ if (ixl_initialize_vsi(vsi)) {
+ device_printf(dev, "initialize vsi failed!!\n");
+ return;
+ }
+
+ /* Set up RSS */
+ ixl_config_rss(pf);
+
+ /* Add protocol filters to list */
+ ixl_init_filters(vsi);
+
+ /* Setup vlan's if needed */
+ ixl_setup_vlan_filters(vsi);
+
+ /* Set up MSI/X routing and the ITR settings */
+ if (pf->enable_msix) {
+ ixl_configure_queue_intr_msix(pf);
+ ixl_configure_itr(pf);
+ } else
+ ixl_configure_legacy(pf);
+
+ ixl_enable_rings(vsi);
+
+ i40e_aq_set_default_vsi(hw, vsi->seid, NULL);
+
+ ixl_reconfigure_filters(vsi);
+
+ /* And now turn on interrupts */
+ ixl_enable_intr(vsi);
+
+ /* Get link info */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ ixl_update_link_status(pf);
+
+ /* Set initial advertised speed sysctl value */
+ ixl_get_initial_advertised_speeds(pf);
+
+ /* Start the local timer */
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+
+ /* Now inform the stack we're ready */
+ ifp->if_drv_flags |= IFF_DRV_RUNNING;
+}
+
+
+/*********************************************************************
+ *
+ * Get the hardware capabilities
+ *
+ **********************************************************************/
+
+int
+ixl_get_hw_capabilities(struct ixl_pf *pf)
+{
+ struct i40e_aqc_list_capabilities_element_resp *buf;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int error, len;
+ u16 needed;
+ bool again = TRUE;
+
+ len = 40 * sizeof(struct i40e_aqc_list_capabilities_element_resp);
+retry:
+ if (!(buf = (struct i40e_aqc_list_capabilities_element_resp *)
+ malloc(len, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate cap memory\n");
+ return (ENOMEM);
+ }
+
+ /* This populates the hw struct */
+ error = i40e_aq_discover_capabilities(hw, buf, len,
+ &needed, i40e_aqc_opc_list_func_capabilities, NULL);
+ free(buf, M_DEVBUF);
+ if ((pf->hw.aq.asq_last_status == I40E_AQ_RC_ENOMEM) &&
+ (again == TRUE)) {
+ /* retry once with a larger buffer */
+ again = FALSE;
+ len = needed;
+ goto retry;
+ } else if (pf->hw.aq.asq_last_status != I40E_AQ_RC_OK) {
+ device_printf(dev, "capability discovery failed: %d\n",
+ pf->hw.aq.asq_last_status);
+ return (ENODEV);
+ }
+
+ /* Capture this PF's starting queue pair */
+ pf->qbase = hw->func_caps.base_queue;
+
+#ifdef IXL_DEBUG
+ device_printf(dev, "pf_id=%d, num_vfs=%d, msix_pf=%d, "
+ "msix_vf=%d, fd_g=%d, fd_b=%d, tx_qp=%d rx_qp=%d qbase=%d\n",
+ hw->pf_id, hw->func_caps.num_vfs,
+ hw->func_caps.num_msix_vectors,
+ hw->func_caps.num_msix_vectors_vf,
+ hw->func_caps.fd_filters_guaranteed,
+ hw->func_caps.fd_filters_best_effort,
+ hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp,
+ hw->func_caps.base_queue);
+#endif
+ /* Print a subset of the capability information. */
+ device_printf(dev, "PF-ID[%d]: VFs %d, MSIX %d, VF MSIX %d, QPs %d, %s\n",
+ hw->pf_id, hw->func_caps.num_vfs, hw->func_caps.num_msix_vectors,
+ hw->func_caps.num_msix_vectors_vf, hw->func_caps.num_tx_qp,
+ (hw->func_caps.mdio_port_mode == 2) ? "I2C" :
+ (hw->func_caps.mdio_port_mode == 1) ? "MDIO dedicated" :
+ "MDIO shared");
+
+ return (error);
+}
+
+void
+ixl_cap_txcsum_tso(struct ixl_vsi *vsi, struct ifnet *ifp, int mask)
+{
+ device_t dev = vsi->dev;
+
+ /* Enable/disable TXCSUM/TSO4 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ ifp->if_capenable |= IFCAP_TXCSUM;
+ /* enable TXCSUM, restore TSO if previously enabled */
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO4) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable |= IFCAP_TSO4;
+ }
+ }
+ else if (mask & IFCAP_TSO4) {
+ ifp->if_capenable |= (IFCAP_TXCSUM | IFCAP_TSO4);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO4;
+ device_printf(dev,
+ "TSO4 requires txcsum, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && !(ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM)
+ ifp->if_capenable &= ~IFCAP_TXCSUM;
+ else if (mask & IFCAP_TSO4)
+ ifp->if_capenable |= IFCAP_TSO4;
+ } else if((ifp->if_capenable & IFCAP_TXCSUM)
+ && (ifp->if_capenable & IFCAP_TSO4)) {
+ if (mask & IFCAP_TXCSUM) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO4;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM | IFCAP_TSO4);
+ device_printf(dev,
+ "TSO4 requires txcsum, disabling both...\n");
+ } else if (mask & IFCAP_TSO4)
+ ifp->if_capenable &= ~IFCAP_TSO4;
+ }
+
+ /* Enable/disable TXCSUM_IPV6/TSO6 */
+ if (!(ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ ifp->if_capenable |= IFCAP_TXCSUM_IPV6;
+ if (vsi->flags & IXL_FLAGS_KEEP_TSO6) {
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable |= IFCAP_TSO6;
+ }
+ } else if (mask & IFCAP_TSO6) {
+ ifp->if_capenable |= (IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ vsi->flags &= ~IXL_FLAGS_KEEP_TSO6;
+ device_printf(dev,
+ "TSO6 requires txcsum6, enabling both...\n");
+ }
+ } else if((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && !(ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6)
+ ifp->if_capenable &= ~IFCAP_TXCSUM_IPV6;
+ else if (mask & IFCAP_TSO6)
+ ifp->if_capenable |= IFCAP_TSO6;
+ } else if ((ifp->if_capenable & IFCAP_TXCSUM_IPV6)
+ && (ifp->if_capenable & IFCAP_TSO6)) {
+ if (mask & IFCAP_TXCSUM_IPV6) {
+ vsi->flags |= IXL_FLAGS_KEEP_TSO6;
+ ifp->if_capenable &= ~(IFCAP_TXCSUM_IPV6 | IFCAP_TSO6);
+ device_printf(dev,
+ "TSO6 requires txcsum6, disabling both...\n");
+ } else if (mask & IFCAP_TSO6)
+ ifp->if_capenable &= ~IFCAP_TSO6;
+ }
+}
+
+/* For the set_advertise sysctl */
+void
+ixl_get_initial_advertised_speeds(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+
+ /* Set initial sysctl values */
+ status = i40e_aq_get_phy_capabilities(hw, FALSE, false, &abilities,
+ NULL);
+ if (status) {
+ /* Non-fatal error */
+ device_printf(dev, "%s: i40e_aq_get_phy_capabilities() error %d\n",
+ __func__, status);
+ return;
+ }
+
+ if (abilities.link_speed & I40E_LINK_SPEED_40GB)
+ pf->advertised_speed |= 0x10;
+ if (abilities.link_speed & I40E_LINK_SPEED_20GB)
+ pf->advertised_speed |= 0x8;
+ if (abilities.link_speed & I40E_LINK_SPEED_10GB)
+ pf->advertised_speed |= 0x4;
+ if (abilities.link_speed & I40E_LINK_SPEED_1GB)
+ pf->advertised_speed |= 0x2;
+ if (abilities.link_speed & I40E_LINK_SPEED_100MB)
+ pf->advertised_speed |= 0x1;
+}
+
+int
+ixl_teardown_hw_structs(struct ixl_pf *pf)
+{
+ enum i40e_status_code status = 0;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+
+ /* Shutdown LAN HMC */
+ if (hw->hmc.hmc_obj) {
+ status = i40e_shutdown_lan_hmc(hw);
+ if (status) {
+ device_printf(dev,
+ "init: LAN HMC shutdown failure; status %d\n", status);
+ goto err_out;
+ }
+ }
+
+ // XXX: This gets called when we know the adminq is inactive;
+ // so we already know it's setup when we get here.
+
+ /* Shutdown admin queue */
+ status = i40e_shutdown_adminq(hw);
+ if (status)
+ device_printf(dev,
+ "init: Admin Queue shutdown failure; status %d\n", status);
+
+err_out:
+ return (status);
+}
+
+int
+ixl_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ u8 set_fc_err_mask;
+ int error = 0;
+
+ // XXX: clear_hw() actually writes to hw registers -- maybe this isn't necessary
+ i40e_clear_hw(hw);
+ error = i40e_pf_reset(hw);
+ if (error) {
+ device_printf(dev, "init: PF reset failure");
+ error = EIO;
+ goto err_out;
+ }
+
+ error = i40e_init_adminq(hw);
+ if (error) {
+ device_printf(dev, "init: Admin queue init failure;"
+ " status code %d", error);
+ error = EIO;
+ goto err_out;
+ }
+
+ i40e_clear_pxe_mode(hw);
+
+ error = ixl_get_hw_capabilities(pf);
+ if (error) {
+ device_printf(dev, "init: Error retrieving HW capabilities;"
+ " status code %d\n", error);
+ goto err_out;
+ }
+
+ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (error) {
+ device_printf(dev, "init: LAN HMC init failed; status code %d\n",
+ error);
+ error = EIO;
+ goto err_out;
+ }
+
+ error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (error) {
+ device_printf(dev, "init: LAN HMC config failed; status code %d\n",
+ error);
+ error = EIO;
+ goto err_out;
+ }
+
+ // XXX: possible fix for panic, but our failure recovery is still broken
+ error = ixl_switch_config(pf);
+ if (error) {
+ device_printf(dev, "init: ixl_switch_config() failed: %d\n",
+ error);
+ goto err_out;
+ }
+
+ error = i40e_aq_set_phy_int_mask(hw, IXL_DEFAULT_PHY_INT_MASK,
+ NULL);
+ if (error) {
+ device_printf(dev, "init: i40e_aq_set_phy_mask() failed: err %d,"
+ " aq_err %d\n", error, hw->aq.asq_last_status);
+ error = EIO;
+ goto err_out;
+ }
+
+ error = i40e_set_fc(hw, &set_fc_err_mask, true);
+ if (error) {
+ device_printf(dev, "init: setting link flow control failed; retcode %d,"
+ " fc_err_mask 0x%02x\n", error, set_fc_err_mask);
+ goto err_out;
+ }
+
+ // XXX: (Rebuild VSIs?)
+
+ /* Firmware delay workaround */
+ if (((hw->aq.fw_maj_ver == 4) && (hw->aq.fw_min_ver < 33)) ||
+ (hw->aq.fw_maj_ver < 4)) {
+ i40e_msec_delay(75);
+ error = i40e_aq_set_link_restart_an(hw, TRUE, NULL);
+ if (error) {
+ device_printf(dev, "init: link restart failed, aq_err %d\n",
+ hw->aq.asq_last_status);
+ goto err_out;
+ }
+ }
+
+
+err_out:
+ return (error);
+}
+
+/*
+** MSIX Interrupt Handlers and Tasklets
+*/
+void
+ixl_handle_que(void *context, int pending)
+{
+ struct ixl_queue *que = context;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ struct ifnet *ifp = vsi->ifp;
+ bool more;
+
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ more = ixl_rxeof(que, IXL_RX_LIMIT);
+ IXL_TX_LOCK(txr);
+ ixl_txeof(que);
+ if (!drbr_empty(ifp, txr->br))
+ ixl_mq_start_locked(ifp, txr);
+ IXL_TX_UNLOCK(txr);
+ if (more) {
+ taskqueue_enqueue(que->tq, &que->task);
+ return;
+ }
+ }
+
+ /* Reenable this interrupt - hmmm */
+ ixl_enable_queue(hw, que->me);
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * Legacy Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_intr(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ struct ifnet *ifp = vsi->ifp;
+ struct tx_ring *txr = &que->txr;
+ u32 reg, icr0, mask;
+ bool more_tx, more_rx;
+
+ ++que->irqs;
+
+ /* Protect against spurious interrupts */
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING) == 0)
+ return;
+
+ icr0 = rd32(hw, I40E_PFINT_ICR0);
+
+ reg = rd32(hw, I40E_PFINT_DYN_CTL0);
+ reg = reg | I40E_PFINT_DYN_CTL0_CLEARPBA_MASK;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+#ifdef PCI_IOV
+ if (icr0 & I40E_PFINT_ICR0_VFLR_MASK)
+ taskqueue_enqueue(pf->tq, &pf->vflr_task);
+#endif
+
+ if (icr0 & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ return;
+ }
+
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+ IXL_TX_LOCK(txr);
+ more_tx = ixl_txeof(que);
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ IXL_TX_UNLOCK(txr);
+
+ /* re-enable other interrupt causes */
+ wr32(hw, I40E_PFINT_ICR0_ENA, mask);
+
+ /* And now the queues */
+ reg = rd32(hw, I40E_QINT_RQCTL(0));
+ reg |= I40E_QINT_RQCTL_CAUSE_ENA_MASK;
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = rd32(hw, I40E_QINT_TQCTL(0));
+ reg |= I40E_QINT_TQCTL_CAUSE_ENA_MASK;
+ reg &= ~I40E_PFINT_ICR0_INTEVENT_MASK;
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+
+ ixl_enable_legacy(hw);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * MSIX VSI Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_msix_que(void *arg)
+{
+ struct ixl_queue *que = arg;
+ struct ixl_vsi *vsi = que->vsi;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ bool more_tx, more_rx;
+
+ /* Protect against spurious interrupts */
+ if (!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING))
+ return;
+
+ ++que->irqs;
+
+ more_rx = ixl_rxeof(que, IXL_RX_LIMIT);
+
+ IXL_TX_LOCK(txr);
+ more_tx = ixl_txeof(que);
+ /*
+ ** Make certain that if the stack
+ ** has anything queued the task gets
+ ** scheduled to handle it.
+ */
+ if (!drbr_empty(vsi->ifp, txr->br))
+ more_tx = 1;
+ IXL_TX_UNLOCK(txr);
+
+ ixl_set_queue_rx_itr(que);
+ ixl_set_queue_tx_itr(que);
+
+ if (more_tx || more_rx)
+ taskqueue_enqueue(que->tq, &que->task);
+ else
+ ixl_enable_queue(hw, que->me);
+
+ return;
+}
+
+
+/*********************************************************************
+ *
+ * MSIX Admin Queue Interrupt Service routine
+ *
+ **********************************************************************/
+void
+ixl_msix_adminq(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ u32 reg, mask, rstat_reg;
+ bool do_task = FALSE;
+
+ ++pf->admin_irq;
+
+ reg = rd32(hw, I40E_PFINT_ICR0);
+ mask = rd32(hw, I40E_PFINT_ICR0_ENA);
+
+ /* Check on the cause */
+ if (reg & I40E_PFINT_ICR0_ADMINQ_MASK) {
+ mask &= ~I40E_PFINT_ICR0_ADMINQ_MASK;
+ do_task = TRUE;
+ }
+
+ if (reg & I40E_PFINT_ICR0_MAL_DETECT_MASK) {
+ ixl_handle_mdd_event(pf);
+ mask &= ~I40E_PFINT_ICR0_MAL_DETECT_MASK;
+ }
+
+ if (reg & I40E_PFINT_ICR0_GRST_MASK) {
+ device_printf(dev, "Reset Requested!\n");
+ rstat_reg = rd32(hw, I40E_GLGEN_RSTAT);
+ rstat_reg = (rstat_reg & I40E_GLGEN_RSTAT_RESET_TYPE_MASK)
+ >> I40E_GLGEN_RSTAT_RESET_TYPE_SHIFT;
+ device_printf(dev, "Reset type: ");
+ switch (rstat_reg) {
+ /* These others might be handled similarly to an EMPR reset */
+ case I40E_RESET_CORER:
+ printf("CORER\n");
+ break;
+ case I40E_RESET_GLOBR:
+ printf("GLOBR\n");
+ break;
+ case I40E_RESET_EMPR:
+ printf("EMPR\n");
+ atomic_set_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
+ break;
+ default:
+ printf("POR\n");
+ break;
+ }
+ /* overload admin queue task to check reset progress */
+ do_task = TRUE;
+ }
+
+ if (reg & I40E_PFINT_ICR0_ECC_ERR_MASK) {
+ device_printf(dev, "ECC Error detected!\n");
+ }
+
+ if (reg & I40E_PFINT_ICR0_HMC_ERR_MASK) {
+ reg = rd32(hw, I40E_PFHMC_ERRORINFO);
+ if (reg & I40E_PFHMC_ERRORINFO_ERROR_DETECTED_MASK) {
+ device_printf(dev, "HMC Error detected!\n");
+ device_printf(dev, "INFO 0x%08x\n", reg);
+ reg = rd32(hw, I40E_PFHMC_ERRORDATA);
+ device_printf(dev, "DATA 0x%08x\n", reg);
+ wr32(hw, I40E_PFHMC_ERRORINFO, 0);
+ }
+ }
+
+ if (reg & I40E_PFINT_ICR0_PCI_EXCEPTION_MASK) {
+ device_printf(dev, "PCI Exception detected!\n");
+ }
+
+#ifdef PCI_IOV
+ if (reg & I40E_PFINT_ICR0_VFLR_MASK) {
+ mask &= ~I40E_PFINT_ICR0_ENA_VFLR_MASK;
+ taskqueue_enqueue(pf->tq, &pf->vflr_task);
+ }
+#endif
+
+ if (do_task)
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ else
+ ixl_enable_adminq(hw);
+}
+
+void
+ixl_set_promisc(struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = vsi->hw;
+ int err, mcnt = 0;
+ bool uni = FALSE, multi = FALSE;
+
+ if (ifp->if_flags & IFF_ALLMULTI)
+ multi = TRUE;
+ else { /* Need to count the multicast addresses */
+ struct ifmultiaddr *ifma;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ if (mcnt == MAX_MULTICAST_ADDR)
+ break;
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+ }
+
+ if (mcnt >= MAX_MULTICAST_ADDR)
+ multi = TRUE;
+ if (ifp->if_flags & IFF_PROMISC)
+ uni = TRUE;
+
+ err = i40e_aq_set_vsi_unicast_promiscuous(hw,
+ vsi->seid, uni, NULL, TRUE);
+ err = i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, multi, NULL);
+ return;
+}
+
+/*********************************************************************
+ * Filter Routines
+ *
+ * Routines for multicast and vlan filter management.
+ *
+ *********************************************************************/
+void
+ixl_add_multi(struct ixl_vsi *vsi)
+{
+ struct ifmultiaddr *ifma;
+ struct ifnet *ifp = vsi->ifp;
+ struct i40e_hw *hw = vsi->hw;
+ int mcnt = 0, flags;
+
+ IOCTL_DEBUGOUT("ixl_add_multi: begin");
+
+ if_maddr_rlock(ifp);
+ /*
+ ** First just get a count, to decide if we
+ ** we simply use multicast promiscuous.
+ */
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+
+ if (__predict_false(mcnt >= MAX_MULTICAST_ADDR)) {
+ /* delete existing MC filters */
+ ixl_del_hw_filters(vsi, mcnt);
+ i40e_aq_set_vsi_multicast_promiscuous(hw,
+ vsi->seid, TRUE, NULL);
+ return;
+ }
+
+ mcnt = 0;
+ if_maddr_rlock(ifp);
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ ixl_add_mc_filter(vsi,
+ (u8*)LLADDR((struct sockaddr_dl *) ifma->ifma_addr));
+ mcnt++;
+ }
+ if_maddr_runlock(ifp);
+ if (mcnt > 0) {
+ flags = (IXL_FILTER_ADD | IXL_FILTER_USED | IXL_FILTER_MC);
+ ixl_add_hw_filters(vsi, flags, mcnt);
+ }
+
+ IOCTL_DEBUGOUT("ixl_add_multi: end");
+ return;
+}
+
+void
+ixl_del_multi(struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp = vsi->ifp;
+ struct ifmultiaddr *ifma;
+ struct ixl_mac_filter *f;
+ int mcnt = 0;
+ bool match = FALSE;
+
+ IOCTL_DEBUGOUT("ixl_del_multi: begin");
+
+ /* Search for removed multicast addresses */
+ if_maddr_rlock(ifp);
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if ((f->flags & IXL_FILTER_USED) && (f->flags & IXL_FILTER_MC)) {
+ match = FALSE;
+ TAILQ_FOREACH(ifma, &ifp->if_multiaddrs, ifma_link) {
+ if (ifma->ifma_addr->sa_family != AF_LINK)
+ continue;
+ u8 *mc_addr = (u8 *)LLADDR((struct sockaddr_dl *)ifma->ifma_addr);
+ if (cmp_etheraddr(f->macaddr, mc_addr)) {
+ match = TRUE;
+ break;
+ }
+ }
+ if (match == FALSE) {
+ f->flags |= IXL_FILTER_DEL;
+ mcnt++;
+ }
+ }
+ }
+ if_maddr_runlock(ifp);
+
+ if (mcnt > 0)
+ ixl_del_hw_filters(vsi, mcnt);
+}
+
+
+/*********************************************************************
+ * Timer routine
+ *
+ * This routine checks for link status,updates statistics,
+ * and runs the watchdog check.
+ *
+ * Only runs when the driver is configured UP and RUNNING.
+ *
+ **********************************************************************/
+
+void
+ixl_local_timer(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = pf->dev;
+ int hung = 0;
+ u32 mask;
+
+ mtx_assert(&pf->pf_mtx, MA_OWNED);
+
+ /* Fire off the adminq task */
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+
+ /* Update stats */
+ ixl_update_stats_counters(pf);
+
+ /* Check status of the queues */
+ mask = (I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_SWINT_TRIG_MASK);
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ /* Any queues with outstanding work get a sw irq */
+ if (que->busy)
+ wr32(hw, I40E_PFINT_DYN_CTLN(que->me), mask);
+ /*
+ ** Each time txeof runs without cleaning, but there
+ ** are uncleaned descriptors it increments busy. If
+ ** we get to 5 we declare it hung.
+ */
+ if (que->busy == IXL_QUEUE_HUNG) {
+ ++hung;
+ continue;
+ }
+ if (que->busy >= IXL_MAX_TX_BUSY) {
+#ifdef IXL_DEBUG
+ device_printf(dev, "Warning queue %d "
+ "appears to be hung!\n", i);
+#endif
+ que->busy = IXL_QUEUE_HUNG;
+ ++hung;
+ }
+ }
+ /* Only reinit if all queues show hung */
+ if (hung == vsi->num_queues)
+ goto hung;
+
+ callout_reset(&pf->timer, hz, ixl_local_timer, pf);
+ return;
+
+hung:
+ device_printf(dev, "Local Timer: HANG DETECT - Resetting!!\n");
+ ixl_init_locked(pf);
+}
+
+/*
+** Note: this routine updates the OS on the link state
+** the real check of the hardware only happens with
+** a link interrupt.
+*/
+void
+ixl_update_link_status(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct i40e_hw *hw = &pf->hw;
+ struct ifnet *ifp = vsi->ifp;
+ device_t dev = pf->dev;
+
+ if (pf->link_up) {
+ if (vsi->link_active == FALSE) {
+ pf->fc = hw->fc.current_mode;
+ if (bootverbose) {
+ device_printf(dev, "Link is up %d Gbps %s,"
+ " Flow Control: %s\n",
+ ((pf->link_speed ==
+ I40E_LINK_SPEED_40GB)? 40:10),
+ "Full Duplex", ixl_fc_string[pf->fc]);
+ }
+ vsi->link_active = TRUE;
+ if_link_state_change(ifp, LINK_STATE_UP);
+ }
+ } else { /* Link down */
+ if (vsi->link_active == TRUE) {
+ if (bootverbose)
+ device_printf(dev, "Link is Down\n");
+ if_link_state_change(ifp, LINK_STATE_DOWN);
+ vsi->link_active = FALSE;
+ }
+ }
+
+ return;
+}
+
+/*********************************************************************
+ *
+ * This routine disables all traffic on the adapter by issuing a
+ * global reset on the MAC and deallocates TX/RX buffers.
+ *
+ **********************************************************************/
+
+void
+ixl_stop_locked(struct ixl_pf *pf)
+{
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ifnet *ifp = vsi->ifp;
+
+ INIT_DEBUGOUT("ixl_stop: begin\n");
+
+ IXL_PF_LOCK_ASSERT(pf);
+
+ /* Stop the local timer */
+ callout_stop(&pf->timer);
+
+ ixl_disable_rings_intr(vsi);
+ ixl_disable_rings(vsi);
+
+ /* Tell the stack that the interface is no longer active */
+ ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
+}
+
+void
+ixl_stop(struct ixl_pf *pf)
+{
+ IXL_PF_LOCK(pf);
+ ixl_stop_locked(pf);
+ IXL_PF_UNLOCK(pf);
+
+ ixl_teardown_queue_msix(&pf->vsi);
+ ixl_free_queue_tqs(&pf->vsi);
+}
+
+/*********************************************************************
+ *
+ * Setup MSIX Interrupt resources and handlers for the VSI
+ *
+ **********************************************************************/
+int
+ixl_assign_vsi_legacy(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *que = vsi->queues;
+ int error, rid = 0;
+
+ if (pf->msix == 1)
+ rid = 1;
+ pf->res = bus_alloc_resource_any(dev, SYS_RES_IRQ,
+ &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (pf->res == NULL) {
+ device_printf(dev, "Unable to allocate"
+ " bus resource: vsi legacy/msi interrupt\n");
+ return (ENXIO);
+ }
+
+ /* Set the handler function */
+ error = bus_setup_intr(dev, pf->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_intr, pf, &pf->tag);
+ if (error) {
+ pf->res = NULL;
+ device_printf(dev, "Failed to register legacy/msi handler\n");
+ return (error);
+ }
+ bus_describe_intr(dev, pf->res, pf->tag, "irq0");
+ TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+ TASK_INIT(&que->task, 0, ixl_handle_que, que);
+ que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+ taskqueue_start_threads(&que->tq, 1, PI_NET, "%s que",
+ device_get_nameunit(dev));
+ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+
+ pf->tq = taskqueue_create_fast("ixl_adm", M_NOWAIT,
+ taskqueue_thread_enqueue, &pf->tq);
+ taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s adminq",
+ device_get_nameunit(dev));
+
+ return (0);
+}
+
+int
+ixl_setup_adminq_tq(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int error = 0;
+
+ /* Tasklet for Admin Queue interrupts */
+ TASK_INIT(&pf->adminq, 0, ixl_do_adminq, pf);
+#ifdef PCI_IOV
+ /* VFLR Tasklet */
+ TASK_INIT(&pf->vflr_task, 0, ixl_handle_vflr, pf);
+#endif
+ /* Create and start Admin Queue taskqueue */
+ pf->tq = taskqueue_create_fast("ixl_aq", M_NOWAIT,
+ taskqueue_thread_enqueue, &pf->tq);
+ if (!pf->tq) {
+ device_printf(dev, "taskqueue_create_fast (for AQ) returned NULL!\n");
+ return (ENOMEM);
+ }
+ error = taskqueue_start_threads(&pf->tq, 1, PI_NET, "%s aq",
+ device_get_nameunit(dev));
+ if (error) {
+ device_printf(dev, "taskqueue_start_threads (for AQ) error: %d\n",
+ error);
+ taskqueue_free(pf->tq);
+ return (error);
+ }
+ return (0);
+}
+
+int
+ixl_setup_queue_tqs(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+#ifdef RSS
+ int cpu_id = 0;
+ cpuset_t cpu_mask;
+#endif
+
+ /* Create queue tasks and start queue taskqueues */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ TASK_INIT(&que->tx_task, 0, ixl_deferred_mq_start, que);
+ TASK_INIT(&que->task, 0, ixl_handle_que, que);
+ que->tq = taskqueue_create_fast("ixl_que", M_NOWAIT,
+ taskqueue_thread_enqueue, &que->tq);
+#ifdef RSS
+ CPU_SETOF(cpu_id, &cpu_mask);
+ taskqueue_start_threads_cpuset(&que->tq, 1, PI_NET,
+ &cpu_mask, "%s (bucket %d)",
+ device_get_nameunit(dev), cpu_id);
+#else
+ taskqueue_start_threads(&que->tq, 1, PI_NET,
+ "%s (que %d)", device_get_nameunit(dev), que->me);
+#endif
+ }
+
+ return (0);
+}
+
+void
+ixl_free_adminq_tq(struct ixl_pf *pf)
+{
+ if (pf->tq) {
+ taskqueue_free(pf->tq);
+ pf->tq = NULL;
+ }
+}
+
+void
+ixl_free_queue_tqs(struct ixl_vsi *vsi)
+{
+ struct ixl_queue *que = vsi->queues;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ if (que->tq) {
+ taskqueue_free(que->tq);
+ que->tq = NULL;
+ }
+ }
+}
+
+int
+ixl_setup_adminq_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int rid, error = 0;
+
+ /* Admin IRQ rid is 1, vector is 0 */
+ rid = 1;
+ /* Get interrupt resource from bus */
+ pf->res = bus_alloc_resource_any(dev,
+ SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE);
+ if (!pf->res) {
+ device_printf(dev, "bus_alloc_resource_any() for Admin Queue"
+ " interrupt failed [rid=%d]\n", rid);
+ return (ENXIO);
+ }
+ /* Then associate interrupt with handler */
+ error = bus_setup_intr(dev, pf->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_msix_adminq, pf, &pf->tag);
+ if (error) {
+ pf->res = NULL;
+ device_printf(dev, "bus_setup_intr() for Admin Queue"
+ " interrupt handler failed, error %d\n", error);
+ return (ENXIO);
+ }
+ error = bus_describe_intr(dev, pf->res, pf->tag, "aq");
+ if (error) {
+ /* Probably non-fatal? */
+ device_printf(dev, "bus_describe_intr() for Admin Queue"
+ " interrupt name failed, error %d\n", error);
+ }
+ pf->admvec = 0;
+
+ return (0);
+}
+
+/*
+ * Allocate interrupt resources from bus and associate an interrupt handler
+ * to those for the VSI's queues.
+ */
+int
+ixl_setup_queue_msix(struct ixl_vsi *vsi)
+{
+ device_t dev = vsi->dev;
+ struct ixl_queue *que = vsi->queues;
+ struct tx_ring *txr;
+ int error, rid, vector = 1;
+
+ /* Queue interrupt vector numbers start at 1 (adminq intr is 0) */
+ for (int i = 0; i < vsi->num_queues; i++, vector++, que++) {
+ int cpu_id = i;
+ rid = vector + 1;
+ txr = &que->txr;
+ que->res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (!que->res) {
+ device_printf(dev, "bus_alloc_resource_any() for"
+ " Queue %d interrupt failed [rid=%d]\n",
+ que->me, rid);
+ return (ENXIO);
+ }
+ /* Set the handler function */
+ error = bus_setup_intr(dev, que->res,
+ INTR_TYPE_NET | INTR_MPSAFE, NULL,
+ ixl_msix_que, que, &que->tag);
+ if (error) {
+ device_printf(dev, "bus_setup_intr() for Queue %d"
+ " interrupt handler failed, error %d\n",
+ que->me, error);
+ return (error);
+ }
+ error = bus_describe_intr(dev, que->res, que->tag, "q%d", i);
+ if (error) {
+ device_printf(dev, "bus_describe_intr() for Queue %d"
+ " interrupt name failed, error %d\n",
+ que->me, error);
+ }
+ /* Bind the vector to a CPU */
+#ifdef RSS
+ cpu_id = rss_getcpu(i % rss_getnumbuckets());
+#endif
+ error = bus_bind_intr(dev, que->res, cpu_id);
+ if (error) {
+ device_printf(dev, "bus_bind_intr() for Queue %d"
+ " to CPU %d failed, error %d\n",
+ que->me, cpu_id, error);
+ }
+ que->msix = vector;
+ }
+
+ return (0);
+}
+
+/*
+ * When used in a virtualized environment PCI BUSMASTER capability may not be set
+ * so explicity set it here and rewrite the ENABLE in the MSIX control register
+ * at this point to cause the host to successfully initialize us.
+ */
+void
+ixl_set_busmaster(device_t dev)
+{
+ u16 pci_cmd_word;
+ int msix_ctrl, rid;
+
+ pci_cmd_word = pci_read_config(dev, PCIR_COMMAND, 2);
+ pci_cmd_word |= PCIM_CMD_BUSMASTEREN;
+ pci_write_config(dev, PCIR_COMMAND, pci_cmd_word, 2);
+
+ pci_find_cap(dev, PCIY_MSIX, &rid);
+ rid += PCIR_MSIX_CTRL;
+ msix_ctrl = pci_read_config(dev, rid, 2);
+ msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
+ pci_write_config(dev, rid, msix_ctrl, 2);
+}
+
+/*
+ * Allocate MSI/X vectors from the OS.
+ * Returns 0 for legacy, 1 for MSI, >1 for MSIX.
+ */
+int
+ixl_init_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ int auto_max_queues;
+ int rid, want, vectors, queues, available;
+
+ /* Override by tuneable */
+ if (!pf->enable_msix)
+ goto no_msix;
+
+ /* Ensure proper operation in virtualized environment */
+ ixl_set_busmaster(dev);
+
+ /* First try MSI/X */
+ rid = PCIR_BAR(IXL_BAR);
+ pf->msix_mem = bus_alloc_resource_any(dev,
+ SYS_RES_MEMORY, &rid, RF_ACTIVE);
+ if (!pf->msix_mem) {
+ /* May not be enabled */
+ device_printf(pf->dev,
+ "Unable to map MSIX table\n");
+ goto no_msix;
+ }
+
+ available = pci_msix_count(dev);
+ if (available < 2) {
+ /* system has msix disabled (0), or only one vector (1) */
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ rid, pf->msix_mem);
+ pf->msix_mem = NULL;
+ goto no_msix;
+ }
+
+ /* Clamp max number of queues based on:
+ * - # of MSI-X vectors available
+ * - # of cpus available
+ * - # of queues that can be assigned to the LAN VSI
+ */
+ auto_max_queues = min(mp_ncpus, available - 1);
+ if (hw->mac.type == I40E_MAC_X722)
+ auto_max_queues = min(auto_max_queues, 128);
+ else
+ auto_max_queues = min(auto_max_queues, 64);
+
+ /* Override with tunable value if tunable is less than autoconfig count */
+ if ((pf->max_queues != 0) && (pf->max_queues <= auto_max_queues))
+ queues = pf->max_queues;
+ /* Use autoconfig amount if that's lower */
+ else if ((pf->max_queues != 0) && (pf->max_queues > auto_max_queues)) {
+ device_printf(dev, "ixl_max_queues (%d) is too large, using "
+ "autoconfig amount (%d)...\n",
+ pf->max_queues, auto_max_queues);
+ queues = auto_max_queues;
+ }
+ /* Limit maximum auto-configured queues to 8 if no user value is set */
+ else
+ queues = min(auto_max_queues, 8);
+
+#ifdef RSS
+ /* If we're doing RSS, clamp at the number of RSS buckets */
+ if (queues > rss_getnumbuckets())
+ queues = rss_getnumbuckets();
+#endif
+
+ /*
+ ** Want one vector (RX/TX pair) per queue
+ ** plus an additional for the admin queue.
+ */
+ want = queues + 1;
+ if (want <= available) /* Have enough */
+ vectors = want;
+ else {
+ device_printf(pf->dev,
+ "MSIX Configuration Problem, "
+ "%d vectors available but %d wanted!\n",
+ available, want);
+ return (0); /* Will go to Legacy setup */
+ }
+
+ if (pci_alloc_msix(dev, &vectors) == 0) {
+ device_printf(pf->dev,
+ "Using MSIX interrupts with %d vectors\n", vectors);
+ pf->msix = vectors;
+ pf->vsi.num_queues = queues;
+#ifdef RSS
+ /*
+ * If we're doing RSS, the number of queues needs to
+ * match the number of RSS buckets that are configured.
+ *
+ * + If there's more queues than RSS buckets, we'll end
+ * up with queues that get no traffic.
+ *
+ * + If there's more RSS buckets than queues, we'll end
+ * up having multiple RSS buckets map to the same queue,
+ * so there'll be some contention.
+ */
+ if (queues != rss_getnumbuckets()) {
+ device_printf(dev,
+ "%s: queues (%d) != RSS buckets (%d)"
+ "; performance will be impacted.\n",
+ __func__, queues, rss_getnumbuckets());
+ }
+#endif
+ return (vectors);
+ }
+no_msix:
+ vectors = pci_msi_count(dev);
+ pf->vsi.num_queues = 1;
+ pf->max_queues = 1;
+ pf->enable_msix = 0;
+ if (vectors == 1 && pci_alloc_msi(dev, &vectors) == 0)
+ device_printf(pf->dev, "Using an MSI interrupt\n");
+ else {
+ vectors = 0;
+ device_printf(pf->dev, "Using a Legacy interrupt\n");
+ }
+ return (vectors);
+}
+
+/*
+ * Configure admin queue/misc interrupt cause registers in hardware.
+ */
+void
+ixl_configure_intr0_msix(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ /* First set up the adminq - vector 0 */
+ wr32(hw, I40E_PFINT_ICR0_ENA, 0); /* disable all */
+ rd32(hw, I40E_PFINT_ICR0); /* read to clear */
+
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_GRST_MASK |
+ I40E_PFINT_ICR0_ENA_HMC_ERR_MASK |
+ I40E_PFINT_ICR0_ENA_ADMINQ_MASK |
+ I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK |
+ I40E_PFINT_ICR0_ENA_VFLR_MASK |
+ I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ /*
+ * 0x7FF is the end of the queue list.
+ * This means we won't use MSI-X vector 0 for a queue interrupt
+ * in MSIX mode.
+ */
+ wr32(hw, I40E_PFINT_LNKLST0, 0x7FF);
+ /* Value is in 2 usec units, so 0x3E is 62*2 = 124 usecs. */
+ wr32(hw, I40E_PFINT_ITR0(IXL_RX_ITR), 0x3E);
+
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTL0_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTL0_INTENA_MSK_MASK);
+
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+}
+
+/*
+ * Configure queue interrupt cause registers in hardware.
+ */
+void
+ixl_configure_queue_intr_msix(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ u32 reg;
+ u16 vector = 1;
+
+ for (int i = 0; i < vsi->num_queues; i++, vector++) {
+ wr32(hw, I40E_PFINT_DYN_CTLN(i), 0);
+ /* First queue type is RX / 0 */
+ wr32(hw, I40E_PFINT_LNKLSTN(i), i);
+
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK |
+ (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_RQCTL_MSIX_INDX_SHIFT) |
+ (i << I40E_QINT_RQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_TX << I40E_QINT_RQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(i), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK |
+ (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) |
+ (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) |
+ (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) |
+ (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(i), reg);
+ }
+}
+
+/*
+ * Configure for MSI single vector operation
+ */
+void
+ixl_configure_legacy(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u32 reg;
+
+ wr32(hw, I40E_PFINT_ITR0(0), 0);
+ wr32(hw, I40E_PFINT_ITR0(1), 0);
+
+ /* Setup "other" causes */
+ reg = I40E_PFINT_ICR0_ENA_ECC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK
+ | I40E_PFINT_ICR0_ENA_GRST_MASK
+ | I40E_PFINT_ICR0_ENA_PCI_EXCEPTION_MASK
+ | I40E_PFINT_ICR0_ENA_GPIO_MASK
+ | I40E_PFINT_ICR0_ENA_LINK_STAT_CHANGE_MASK
+ | I40E_PFINT_ICR0_ENA_HMC_ERR_MASK
+ | I40E_PFINT_ICR0_ENA_PE_CRITERR_MASK
+ | I40E_PFINT_ICR0_ENA_VFLR_MASK
+ | I40E_PFINT_ICR0_ENA_ADMINQ_MASK
+ ;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+
+ /* SW_ITR_IDX = 0, but don't change INTENA */
+ wr32(hw, I40E_PFINT_DYN_CTL0,
+ I40E_PFINT_DYN_CTLN_SW_ITR_INDX_MASK |
+ I40E_PFINT_DYN_CTLN_INTENA_MSK_MASK);
+ /* SW_ITR_IDX = 0, OTHER_ITR_IDX = 0 */
+ wr32(hw, I40E_PFINT_STAT_CTL0, 0);
+
+ /* FIRSTQ_INDX = 0, FIRSTQ_TYPE = 0 (rx) */
+ wr32(hw, I40E_PFINT_LNKLST0, 0);
+
+ /* Associate the queue pair to the vector and enable the q int */
+ reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK
+ | (IXL_RX_ITR << I40E_QINT_RQCTL_ITR_INDX_SHIFT)
+ | (I40E_QUEUE_TYPE_TX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT);
+ wr32(hw, I40E_QINT_RQCTL(0), reg);
+
+ reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK
+ | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT)
+ | (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT);
+ wr32(hw, I40E_QINT_TQCTL(0), reg);
+}
+
+int
+ixl_allocate_pci_resources(struct ixl_pf *pf)
+{
+ int rid;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+
+ /* Map BAR0 */
+ rid = PCIR_BAR(0);
+ pf->pci_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
+ &rid, RF_ACTIVE);
+
+ if (!(pf->pci_mem)) {
+ device_printf(dev, "Unable to allocate bus resource: PCI memory\n");
+ return (ENXIO);
+ }
+
+ /* Save off the PCI information */
+ hw->vendor_id = pci_get_vendor(dev);
+ hw->device_id = pci_get_device(dev);
+ hw->revision_id = pci_read_config(dev, PCIR_REVID, 1);
+ hw->subsystem_vendor_id =
+ pci_read_config(dev, PCIR_SUBVEND_0, 2);
+ hw->subsystem_device_id =
+ pci_read_config(dev, PCIR_SUBDEV_0, 2);
+
+ hw->bus.device = pci_get_slot(dev);
+ hw->bus.func = pci_get_function(dev);
+
+ /* Save off register access information */
+ pf->osdep.mem_bus_space_tag =
+ rman_get_bustag(pf->pci_mem);
+ pf->osdep.mem_bus_space_handle =
+ rman_get_bushandle(pf->pci_mem);
+ pf->osdep.mem_bus_space_size = rman_get_size(pf->pci_mem);
+ pf->osdep.flush_reg = I40E_GLGEN_STAT;
+ pf->hw.hw_addr = (u8 *) &pf->osdep.mem_bus_space_handle;
+
+ pf->hw.back = &pf->osdep;
+
+ return (0);
+}
+
+/*
+ * Teardown and release the admin queue/misc vector
+ * interrupt.
+ */
+int
+ixl_teardown_adminq_msix(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int rid;
+
+ if (pf->admvec) /* we are doing MSIX */
+ rid = pf->admvec + 1;
+ else
+ (pf->msix != 0) ? (rid = 1):(rid = 0);
+
+ if (pf->tag != NULL) {
+ bus_teardown_intr(dev, pf->res, pf->tag);
+ pf->tag = NULL;
+ }
+ if (pf->res != NULL) {
+ bus_release_resource(dev, SYS_RES_IRQ, rid, pf->res);
+ pf->res = NULL;
+ }
+
+ return (0);
+}
+
+int
+ixl_teardown_queue_msix(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ int rid, error = 0;
+
+ /* We may get here before stations are setup */
+ if ((!pf->enable_msix) || (que == NULL))
+ return (0);
+
+ /* Release all MSIX queue resources */
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ rid = que->msix + 1;
+ if (que->tag != NULL) {
+ error = bus_teardown_intr(dev, que->res, que->tag);
+ if (error) {
+ device_printf(dev, "bus_teardown_intr() for"
+ " Queue %d interrupt failed\n",
+ que->me);
+ // return (ENXIO);
+ }
+ que->tag = NULL;
+ }
+ if (que->res != NULL) {
+ error = bus_release_resource(dev, SYS_RES_IRQ, rid, que->res);
+ if (error) {
+ device_printf(dev, "bus_release_resource() for"
+ " Queue %d interrupt failed [rid=%d]\n",
+ que->me, rid);
+ // return (ENXIO);
+ }
+ que->res = NULL;
+ }
+ }
+
+ return (0);
+}
+
+void
+ixl_free_pci_resources(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ int memrid;
+
+ ixl_teardown_queue_msix(&pf->vsi);
+ ixl_teardown_adminq_msix(pf);
+
+ if (pf->msix)
+ pci_release_msi(dev);
+
+ memrid = PCIR_BAR(IXL_BAR);
+
+ if (pf->msix_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ memrid, pf->msix_mem);
+
+ if (pf->pci_mem != NULL)
+ bus_release_resource(dev, SYS_RES_MEMORY,
+ PCIR_BAR(0), pf->pci_mem);
+
+ return;
+}
+
+void
+ixl_add_ifmedia(struct ixl_vsi *vsi, u32 phy_type)
+{
+ /* Display supported media types */
+ if (phy_type & (1 << I40E_PHY_TYPE_100BASE_TX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_100_TX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_T, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_SX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_SX, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_LX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_LX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_XAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_XFI) ||
+ phy_type & (1 << I40E_PHY_TYPE_10GBASE_SFPP_CU))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_SR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_LR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_LR, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_T))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_T, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_CR4_CU) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_AOC) ||
+ phy_type & (1 << I40E_PHY_TYPE_XLAUI) ||
+ phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_CR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_SR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_SR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_LR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_LR4, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_1000BASE_KX))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_1000_KX, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1_CU)
+ || phy_type & (1 << I40E_PHY_TYPE_10GBASE_CR1))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_CR1, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_AOC))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_TWINAX_LONG, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_SFI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_SFI, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KX4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KX4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_10GBASE_KR))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_10G_KR, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_20GBASE_KR2))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_20G_KR2, 0, NULL);
+
+ if (phy_type & (1 << I40E_PHY_TYPE_40GBASE_KR4))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_KR4, 0, NULL);
+ if (phy_type & (1 << I40E_PHY_TYPE_XLPPI))
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_40G_XLPPI, 0, NULL);
+}
+
+/*********************************************************************
+ *
+ * Setup networking device structure and register an interface.
+ *
+ **********************************************************************/
+int
+ixl_setup_interface(device_t dev, struct ixl_vsi *vsi)
+{
+ struct ifnet *ifp;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ enum i40e_status_code aq_error = 0;
+
+ INIT_DEBUGOUT("ixl_setup_interface: begin");
+
+ ifp = vsi->ifp = if_alloc(IFT_ETHER);
+ if (ifp == NULL) {
+ device_printf(dev, "can not allocate ifnet structure\n");
+ return (-1);
+ }
+ if_initname(ifp, device_get_name(dev), device_get_unit(dev));
+ ifp->if_mtu = ETHERMTU;
+ ifp->if_baudrate = IF_Gbps(40);
+ ifp->if_init = ixl_init;
+ ifp->if_softc = vsi;
+ ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
+ ifp->if_ioctl = ixl_ioctl;
+
+#if __FreeBSD_version >= 1100036
+ if_setgetcounterfn(ifp, ixl_get_counter);
+#endif
+
+ ifp->if_transmit = ixl_mq_start;
+
+ ifp->if_qflush = ixl_qflush;
+
+ ifp->if_snd.ifq_maxlen = que->num_desc - 2;
+
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+
+ /* Set TSO limits */
+ ifp->if_hw_tsomax = IP_MAXPACKET - (ETHER_HDR_LEN + ETHER_CRC_LEN);
+ ifp->if_hw_tsomaxsegcount = IXL_MAX_TSO_SEGS;
+ ifp->if_hw_tsomaxsegsize = PAGE_SIZE;
+
+ /*
+ * Tell the upper layer(s) we support long frames.
+ */
+ ifp->if_hdrlen = sizeof(struct ether_vlan_header);
+
+ ifp->if_capabilities |= IFCAP_HWCSUM;
+ ifp->if_capabilities |= IFCAP_HWCSUM_IPV6;
+ ifp->if_capabilities |= IFCAP_TSO;
+ ifp->if_capabilities |= IFCAP_JUMBO_MTU;
+ ifp->if_capabilities |= IFCAP_LRO;
+
+ /* VLAN capabilties */
+ ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING
+ | IFCAP_VLAN_HWTSO
+ | IFCAP_VLAN_MTU
+ | IFCAP_VLAN_HWCSUM;
+ ifp->if_capenable = ifp->if_capabilities;
+
+ /*
+ ** Don't turn this on by default, if vlans are
+ ** created on another pseudo device (eg. lagg)
+ ** then vlan events are not passed thru, breaking
+ ** operation, but with HW FILTER off it works. If
+ ** using vlans directly on the ixl driver you can
+ ** enable this and get full hardware tag filtering.
+ */
+ ifp->if_capabilities |= IFCAP_VLAN_HWFILTER;
+
+ /*
+ * Specify the media types supported by this adapter and register
+ * callbacks to update media and link information
+ */
+ ifmedia_init(&vsi->media, IFM_IMASK, ixl_media_change,
+ ixl_media_status);
+
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, TRUE, &abilities, NULL);
+ /* May need delay to detect fiber correctly */
+ if (aq_error == I40E_ERR_UNKNOWN_PHY) {
+ i40e_msec_delay(200);
+ aq_error = i40e_aq_get_phy_capabilities(hw, FALSE,
+ TRUE, &abilities, NULL);
+ }
+ if (aq_error) {
+ if (aq_error == I40E_ERR_UNKNOWN_PHY)
+ device_printf(dev, "Unknown PHY type detected!\n");
+ else
+ device_printf(dev,
+ "Error getting supported media types, err %d,"
+ " AQ error %d\n", aq_error, hw->aq.asq_last_status);
+ return (0);
+ }
+
+ ixl_add_ifmedia(vsi, abilities.phy_type);
+
+ /* Use autoselect media by default */
+ ifmedia_add(&vsi->media, IFM_ETHER | IFM_AUTO, 0, NULL);
+ ifmedia_set(&vsi->media, IFM_ETHER | IFM_AUTO);
+
+ ether_ifattach(ifp, hw->mac.addr);
+
+ return (0);
+}
+
+/*
+** Run when the Admin Queue gets a link state change interrupt.
+*/
+void
+ixl_link_event(struct ixl_pf *pf, struct i40e_arq_event_info *e)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_aqc_get_link_status *status =
+ (struct i40e_aqc_get_link_status *)&e->desc.params.raw;
+
+ /* Request link status from adapter */
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+
+ /* Print out message if an unqualified module is found */
+ if ((status->link_info & I40E_AQ_MEDIA_AVAILABLE) &&
+ (!(status->an_info & I40E_AQ_QUALIFIED_MODULE)) &&
+ (!(status->link_info & I40E_AQ_LINK_UP)))
+ device_printf(dev, "Link failed because "
+ "an unqualified module was detected!\n");
+
+ /* Update OS link info */
+ ixl_update_link_status(pf);
+}
+
+/*********************************************************************
+ *
+ * Get Firmware Switch configuration
+ * - this will need to be more robust when more complex
+ * switch configurations are enabled.
+ *
+ **********************************************************************/
+int
+ixl_switch_config(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = vsi->dev;
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ u8 aq_buf[I40E_AQ_LARGE_BUF];
+ int ret;
+ u16 next = 0;
+
+ memset(&aq_buf, 0, sizeof(aq_buf));
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+ ret = i40e_aq_get_switch_config(hw, sw_config,
+ sizeof(aq_buf), &next, NULL);
+ if (ret) {
+ device_printf(dev, "aq_get_switch_config() failed, error %d,"
+ " aq_error %d\n", ret, pf->hw.aq.asq_last_status);
+ return (ret);
+ }
+ if (pf->dbg_mask & IXL_DBG_SWITCH_INFO) {
+ device_printf(dev,
+ "Switch config: header reported: %d in structure, %d total\n",
+ sw_config->header.num_reported, sw_config->header.num_total);
+ for (int i = 0; i < sw_config->header.num_reported; i++) {
+ device_printf(dev,
+ "%d: type=%d seid=%d uplink=%d downlink=%d\n", i,
+ sw_config->element[i].element_type,
+ sw_config->element[i].seid,
+ sw_config->element[i].uplink_seid,
+ sw_config->element[i].downlink_seid);
+ }
+ }
+ /* Simplified due to a single VSI */
+ vsi->uplink_seid = sw_config->element[0].uplink_seid;
+ vsi->downlink_seid = sw_config->element[0].downlink_seid;
+ vsi->seid = sw_config->element[0].seid;
+ return (ret);
+}
+
+/*********************************************************************
+ *
+ * Initialize the VSI: this handles contexts, which means things
+ * like the number of descriptors, buffer size,
+ * plus we init the rings thru this function.
+ *
+ **********************************************************************/
+int
+ixl_initialize_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ struct ixl_queue *que = vsi->queues;
+ device_t dev = vsi->dev;
+ struct i40e_hw *hw = vsi->hw;
+ struct i40e_vsi_context ctxt;
+ int tc_queues;
+ int err = 0;
+
+ memset(&ctxt, 0, sizeof(ctxt));
+ ctxt.seid = vsi->seid;
+ if (pf->veb_seid != 0)
+ ctxt.uplink_seid = pf->veb_seid;
+ ctxt.pf_num = hw->pf_id;
+ err = i40e_aq_get_vsi_params(hw, &ctxt, NULL);
+ if (err) {
+ device_printf(dev, "i40e_aq_get_vsi_params() failed, error %d"
+ " aq_error %d\n", err, hw->aq.asq_last_status);
+ return (err);
+ }
+ ixl_dbg(pf, IXL_DBG_SWITCH_INFO,
+ "get_vsi_params: seid: %d, uplinkseid: %d, vsi_number: %d, "
+ "vsis_allocated: %d, vsis_unallocated: %d, flags: 0x%x, "
+ "pfnum: %d, vfnum: %d, stat idx: %d, enabled: %d\n", ctxt.seid,
+ ctxt.uplink_seid, ctxt.vsi_number,
+ ctxt.vsis_allocated, ctxt.vsis_unallocated,
+ ctxt.flags, ctxt.pf_num, ctxt.vf_num,
+ ctxt.info.stat_counter_idx, ctxt.info.up_enable_bits);
+ /*
+ ** Set the queue and traffic class bits
+ ** - when multiple traffic classes are supported
+ ** this will need to be more robust.
+ */
+ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID;
+ ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG;
+ /* In contig mode, que_mapping[0] is first queue index used by this VSI */
+ ctxt.info.queue_mapping[0] = 0;
+ /*
+ * This VSI will only use traffic class 0; start traffic class 0's
+ * queue allocation at queue 0, and assign it 2^tc_queues queues (though
+ * the driver may not use all of them).
+ */
+ tc_queues = bsrl(pf->qtag.num_allocated);
+ ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) |
+ ((tc_queues << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT)
+ & I40E_AQ_VSI_TC_QUE_NUMBER_MASK);
+
+ /* Set VLAN receive stripping mode */
+ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID;
+ ctxt.info.port_vlan_flags = I40E_AQ_VSI_PVLAN_MODE_ALL;
+ if (vsi->ifp->if_capenable & IFCAP_VLAN_HWTAGGING)
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_STR_BOTH;
+ else
+ ctxt.info.port_vlan_flags |= I40E_AQ_VSI_PVLAN_EMOD_NOTHING;
+
+ /* Save VSI number and info for use later */
+ vsi->vsi_num = ctxt.vsi_number;
+ bcopy(&ctxt.info, &vsi->info, sizeof(vsi->info));
+
+ /* Reset VSI statistics */
+ ixl_vsi_reset_stats(vsi);
+ vsi->hw_filters_add = 0;
+ vsi->hw_filters_del = 0;
+
+ ctxt.flags = htole16(I40E_AQ_VSI_TYPE_PF);
+
+ err = i40e_aq_update_vsi_params(hw, &ctxt, NULL);
+ if (err) {
+ device_printf(dev, "i40e_aq_update_vsi_params() failed, error %d,"
+ " aq_error %d\n", err, hw->aq.asq_last_status);
+ return (err);
+ }
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+ struct i40e_hmc_obj_txq tctx;
+ struct i40e_hmc_obj_rxq rctx;
+ u32 txctl;
+ u16 size;
+
+ /* Setup the HMC TX Context */
+ size = que->num_desc * sizeof(struct i40e_tx_desc);
+ memset(&tctx, 0, sizeof(struct i40e_hmc_obj_txq));
+ tctx.new_context = 1;
+ tctx.base = (txr->dma.pa/IXL_TX_CTX_BASE_UNITS);
+ tctx.qlen = que->num_desc;
+ tctx.fc_ena = 0;
+ tctx.rdylist = vsi->info.qs_handle[0]; /* index is TC */
+ /* Enable HEAD writeback */
+ tctx.head_wb_ena = 1;
+ tctx.head_wb_addr = txr->dma.pa +
+ (que->num_desc * sizeof(struct i40e_tx_desc));
+ tctx.rdylist_act = 0;
+ err = i40e_clear_lan_tx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev, "Unable to clear TX context\n");
+ break;
+ }
+ err = i40e_set_lan_tx_queue_context(hw, i, &tctx);
+ if (err) {
+ device_printf(dev, "Unable to set TX context\n");
+ break;
+ }
+ /* Associate the ring with this PF */
+ txctl = I40E_QTX_CTL_PF_QUEUE;
+ txctl |= ((hw->pf_id << I40E_QTX_CTL_PF_INDX_SHIFT) &
+ I40E_QTX_CTL_PF_INDX_MASK);
+ wr32(hw, I40E_QTX_CTL(i), txctl);
+ ixl_flush(hw);
+
+ /* Do ring (re)init */
+ ixl_init_tx_ring(que);
+
+ /* Next setup the HMC RX Context */
+ if (vsi->max_frame_size <= MCLBYTES)
+ rxr->mbuf_sz = MCLBYTES;
+ else
+ rxr->mbuf_sz = MJUMPAGESIZE;
+
+ u16 max_rxmax = rxr->mbuf_sz * hw->func_caps.rx_buf_chain_len;
+
+ /* Set up an RX context for the HMC */
+ memset(&rctx, 0, sizeof(struct i40e_hmc_obj_rxq));
+ rctx.dbuff = rxr->mbuf_sz >> I40E_RXQ_CTX_DBUFF_SHIFT;
+ /* ignore header split for now */
+ rctx.hbuff = 0 >> I40E_RXQ_CTX_HBUFF_SHIFT;
+ rctx.rxmax = (vsi->max_frame_size < max_rxmax) ?
+ vsi->max_frame_size : max_rxmax;
+ rctx.dtype = 0;
+ rctx.dsize = 1; /* do 32byte descriptors */
+ rctx.hsplit_0 = 0; /* no HDR split initially */
+ rctx.base = (rxr->dma.pa/IXL_RX_CTX_BASE_UNITS);
+ rctx.qlen = que->num_desc;
+ rctx.tphrdesc_ena = 1;
+ rctx.tphwdesc_ena = 1;
+ rctx.tphdata_ena = 0;
+ rctx.tphhead_ena = 0;
+ rctx.lrxqthresh = 2;
+ rctx.crcstrip = 1;
+ rctx.l2tsel = 1;
+ rctx.showiv = 1;
+ rctx.fc_ena = 0;
+ rctx.prefena = 1;
+
+ err = i40e_clear_lan_rx_queue_context(hw, i);
+ if (err) {
+ device_printf(dev,
+ "Unable to clear RX context %d\n", i);
+ break;
+ }
+ err = i40e_set_lan_rx_queue_context(hw, i, &rctx);
+ if (err) {
+ device_printf(dev, "Unable to set RX context %d\n", i);
+ break;
+ }
+ err = ixl_init_rx_ring(que);
+ if (err) {
+ device_printf(dev, "Fail in init_rx_ring %d\n", i);
+ break;
+ }
+#ifdef DEV_NETMAP
+ /* preserve queue */
+ if (vsi->ifp->if_capenable & IFCAP_NETMAP) {
+ struct netmap_adapter *na = NA(vsi->ifp);
+ struct netmap_kring *kring = &na->rx_rings[i];
+ int t = na->num_rx_desc - 1 - nm_kr_rxspace(kring);
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), t);
+ } else
+#endif /* DEV_NETMAP */
+ wr32(vsi->hw, I40E_QRX_TAIL(que->me), que->num_desc - 1);
+ }
+ return (err);
+}
+
+
+/*********************************************************************
+ *
+ * Free all VSI structs.
+ *
+ **********************************************************************/
+void
+ixl_free_vsi(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct ixl_queue *que = vsi->queues;
+
+ /* Free station queues */
+ if (!vsi->queues)
+ goto free_filters;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++) {
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+
+ if (!mtx_initialized(&txr->mtx)) /* uninitialized */
+ continue;
+ IXL_TX_LOCK(txr);
+ ixl_free_que_tx(que);
+ if (txr->base)
+ i40e_free_dma_mem(&pf->hw, &txr->dma);
+ IXL_TX_UNLOCK(txr);
+ IXL_TX_LOCK_DESTROY(txr);
+
+ if (!mtx_initialized(&rxr->mtx)) /* uninitialized */
+ continue;
+ IXL_RX_LOCK(rxr);
+ ixl_free_que_rx(que);
+ if (rxr->base)
+ i40e_free_dma_mem(&pf->hw, &rxr->dma);
+ IXL_RX_UNLOCK(rxr);
+ IXL_RX_LOCK_DESTROY(rxr);
+ }
+ free(vsi->queues, M_DEVBUF);
+
+free_filters:
+ /* Free VSI filter list */
+ ixl_free_mac_filters(vsi);
+}
+
+void
+ixl_free_mac_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+
+ while (!SLIST_EMPTY(&vsi->ftl)) {
+ f = SLIST_FIRST(&vsi->ftl);
+ SLIST_REMOVE_HEAD(&vsi->ftl, next);
+ free(f, M_DEVBUF);
+ }
+}
+
+/*
+ * Fill out fields in queue struct and setup tx/rx memory and structs
+ */
+static int
+ixl_setup_queue(struct ixl_queue *que, struct ixl_pf *pf, int index)
+{
+ device_t dev = pf->dev;
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct tx_ring *txr = &que->txr;
+ struct rx_ring *rxr = &que->rxr;
+ int error = 0;
+ int rsize, tsize;
+
+ /* ERJ: A lot of references to external objects... */
+ que->num_desc = pf->ringsz;
+ que->me = index;
+ que->vsi = vsi;
+
+ txr->que = que;
+ txr->tail = I40E_QTX_TAIL(que->me);
+
+ /* Initialize the TX lock */
+ snprintf(txr->mtx_name, sizeof(txr->mtx_name), "%s:tx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&txr->mtx, txr->mtx_name, NULL, MTX_DEF);
+ /* Create the TX descriptor ring */
+ tsize = roundup2((que->num_desc *
+ sizeof(struct i40e_tx_desc)) +
+ sizeof(u32), DBA_ALIGN);
+ if (i40e_allocate_dma_mem(hw,
+ &txr->dma, i40e_mem_reserved, tsize, DBA_ALIGN)) {
+ device_printf(dev,
+ "Unable to allocate TX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ txr->base = (struct i40e_tx_desc *)txr->dma.va;
+ bzero((void *)txr->base, tsize);
+ /* Now allocate transmit soft structs for the ring */
+ if (ixl_allocate_tx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up TX structures\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ /* Allocate a buf ring */
+ txr->br = buf_ring_alloc(DEFAULT_TXBRSZ, M_DEVBUF,
+ M_NOWAIT, &txr->mtx);
+ if (txr->br == NULL) {
+ device_printf(dev,
+ "Critical Failure setting up TX buf ring\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ rsize = roundup2(que->num_desc *
+ sizeof(union i40e_rx_desc), DBA_ALIGN);
+ rxr->que = que;
+ rxr->tail = I40E_QRX_TAIL(que->me);
+
+ /* Initialize the RX side lock */
+ snprintf(rxr->mtx_name, sizeof(rxr->mtx_name), "%s:rx(%d)",
+ device_get_nameunit(dev), que->me);
+ mtx_init(&rxr->mtx, rxr->mtx_name, NULL, MTX_DEF);
+
+ if (i40e_allocate_dma_mem(hw,
+ &rxr->dma, i40e_mem_reserved, rsize, 4096)) {
+ device_printf(dev,
+ "Unable to allocate RX Descriptor memory\n");
+ error = ENOMEM;
+ goto fail;
+ }
+ rxr->base = (union i40e_rx_desc *)rxr->dma.va;
+ bzero((void *)rxr->base, rsize);
+ /* Allocate receive soft structs for the ring*/
+ if (ixl_allocate_rx_data(que)) {
+ device_printf(dev,
+ "Critical Failure setting up receive structs\n");
+ error = ENOMEM;
+ goto fail;
+ }
+
+ return (0);
+fail:
+ if (rxr->base)
+ i40e_free_dma_mem(&pf->hw, &rxr->dma);
+ if (mtx_initialized(&rxr->mtx))
+ mtx_destroy(&rxr->mtx);
+ if (txr->br) {
+ buf_ring_free(txr->br, M_DEVBUF);
+ txr->br = NULL;
+ }
+ if (txr->base)
+ i40e_free_dma_mem(&pf->hw, &txr->dma);
+ if (mtx_initialized(&txr->mtx))
+ mtx_destroy(&txr->mtx);
+
+ return (error);
+}
+
+/*********************************************************************
+ *
+ * Allocate memory for the VSI (virtual station interface) and their
+ * associated queues, rings and the descriptors associated with each,
+ * called only once at attach.
+ *
+ **********************************************************************/
+int
+ixl_setup_stations(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi;
+ struct ixl_queue *que;
+ int error = 0;
+
+ vsi = &pf->vsi;
+ vsi->back = (void *)pf;
+ vsi->hw = &pf->hw;
+ vsi->id = 0;
+ vsi->num_vlans = 0;
+ vsi->back = pf;
+
+ /* Get memory for the station queues */
+ if (!(vsi->queues =
+ (struct ixl_queue *) malloc(sizeof(struct ixl_queue) *
+ vsi->num_queues, M_DEVBUF, M_NOWAIT | M_ZERO))) {
+ device_printf(dev, "Unable to allocate queue memory\n");
+ error = ENOMEM;
+ return (error);
+ }
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ que = &vsi->queues[i];
+ error = ixl_setup_queue(que, pf, i);
+ if (error)
+ return (error);
+ }
+
+ return (0);
+}
+
+/*
+** Provide a update to the queue RX
+** interrupt moderation value.
+*/
+void
+ixl_set_queue_rx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct rx_ring *rxr = &que->rxr;
+ u16 rx_itr;
+ u16 rx_latency = 0;
+ int rx_bytes;
+
+ /* Idle, do nothing */
+ if (rxr->bytes == 0)
+ return;
+
+ if (pf->dynamic_rx_itr) {
+ rx_bytes = rxr->bytes/rxr->itr;
+ rx_itr = rxr->itr;
+
+ /* Adjust latency range */
+ switch (rxr->latency) {
+ case IXL_LOW_LATENCY:
+ if (rx_bytes > 10) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (rx_bytes > 20) {
+ rx_latency = IXL_BULK_LATENCY;
+ rx_itr = IXL_ITR_8K;
+ } else if (rx_bytes <= 10) {
+ rx_latency = IXL_LOW_LATENCY;
+ rx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (rx_bytes <= 20) {
+ rx_latency = IXL_AVE_LATENCY;
+ rx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ rxr->latency = rx_latency;
+
+ if (rx_itr != rxr->itr) {
+ /* do an exponential smoothing */
+ rx_itr = (10 * rx_itr * rxr->itr) /
+ ((9 * rx_itr) + rxr->itr);
+ rxr->itr = rx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->rx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->rx_itr_setting = pf->rx_itr;
+ /* Update the hardware if needed */
+ if (rxr->itr != vsi->rx_itr_setting) {
+ rxr->itr = vsi->rx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_RX_ITR,
+ que->me), rxr->itr);
+ }
+ }
+ rxr->bytes = 0;
+ rxr->packets = 0;
+ return;
+}
+
+
+/*
+** Provide a update to the queue TX
+** interrupt moderation value.
+*/
+void
+ixl_set_queue_tx_itr(struct ixl_queue *que)
+{
+ struct ixl_vsi *vsi = que->vsi;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct tx_ring *txr = &que->txr;
+ u16 tx_itr;
+ u16 tx_latency = 0;
+ int tx_bytes;
+
+
+ /* Idle, do nothing */
+ if (txr->bytes == 0)
+ return;
+
+ if (pf->dynamic_tx_itr) {
+ tx_bytes = txr->bytes/txr->itr;
+ tx_itr = txr->itr;
+
+ switch (txr->latency) {
+ case IXL_LOW_LATENCY:
+ if (tx_bytes > 10) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ case IXL_AVE_LATENCY:
+ if (tx_bytes > 20) {
+ tx_latency = IXL_BULK_LATENCY;
+ tx_itr = IXL_ITR_8K;
+ } else if (tx_bytes <= 10) {
+ tx_latency = IXL_LOW_LATENCY;
+ tx_itr = IXL_ITR_100K;
+ }
+ break;
+ case IXL_BULK_LATENCY:
+ if (tx_bytes <= 20) {
+ tx_latency = IXL_AVE_LATENCY;
+ tx_itr = IXL_ITR_20K;
+ }
+ break;
+ }
+
+ txr->latency = tx_latency;
+
+ if (tx_itr != txr->itr) {
+ /* do an exponential smoothing */
+ tx_itr = (10 * tx_itr * txr->itr) /
+ ((9 * tx_itr) + txr->itr);
+ txr->itr = tx_itr & IXL_MAX_ITR;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+
+ } else { /* We may have have toggled to non-dynamic */
+ if (vsi->tx_itr_setting & IXL_ITR_DYNAMIC)
+ vsi->tx_itr_setting = pf->tx_itr;
+ /* Update the hardware if needed */
+ if (txr->itr != vsi->tx_itr_setting) {
+ txr->itr = vsi->tx_itr_setting;
+ wr32(hw, I40E_PFINT_ITRN(IXL_TX_ITR,
+ que->me), txr->itr);
+ }
+ }
+ txr->bytes = 0;
+ txr->packets = 0;
+ return;
+}
+
+void
+ixl_add_vsi_sysctls(struct ixl_pf *pf, struct ixl_vsi *vsi,
+ struct sysctl_ctx_list *ctx, const char *sysctl_name)
+{
+ struct sysctl_oid *tree;
+ struct sysctl_oid_list *child;
+ struct sysctl_oid_list *vsi_list;
+
+ tree = device_get_sysctl_tree(pf->dev);
+ child = SYSCTL_CHILDREN(tree);
+ vsi->vsi_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, sysctl_name,
+ CTLFLAG_RD, NULL, "VSI Number");
+ vsi_list = SYSCTL_CHILDREN(vsi->vsi_node);
+
+ ixl_add_sysctls_eth_stats(ctx, vsi_list, &vsi->eth_stats);
+}
+
+#ifdef IXL_DEBUG
+/**
+ * ixl_sysctl_qtx_tail_handler
+ * Retrieves I40E_QTX_TAIL value from hardware
+ * for a sysctl.
+ */
+int
+ixl_sysctl_qtx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->txr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+
+/**
+ * ixl_sysctl_qrx_tail_handler
+ * Retrieves I40E_QRX_TAIL value from hardware
+ * for a sysctl.
+ */
+int
+ixl_sysctl_qrx_tail_handler(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_queue *que;
+ int error;
+ u32 val;
+
+ que = ((struct ixl_queue *)oidp->oid_arg1);
+ if (!que) return 0;
+
+ val = rd32(que->vsi->hw, que->rxr.tail);
+ error = sysctl_handle_int(oidp, &val, 0, req);
+ if (error || !req->newptr)
+ return error;
+ return (0);
+}
+#endif
+
+/*
+ * Used to set the Tx ITR value for all of the PF LAN VSI's queues.
+ * Writes to the ITR registers immediately.
+ */
+static int
+ixl_sysctl_pf_tx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ int error = 0;
+ int requested_tx_itr;
+
+ requested_tx_itr = pf->tx_itr;
+ error = sysctl_handle_int(oidp, &requested_tx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (pf->dynamic_tx_itr) {
+ device_printf(dev,
+ "Cannot set TX itr value while dynamic TX itr is enabled\n");
+ return (EINVAL);
+ }
+ if (requested_tx_itr < 0 || requested_tx_itr > IXL_MAX_ITR) {
+ device_printf(dev,
+ "Invalid TX itr value; value must be between 0 and %d\n",
+ IXL_MAX_ITR);
+ return (EINVAL);
+ }
+
+ pf->tx_itr = requested_tx_itr;
+ ixl_configure_tx_itr(pf);
+
+ return (error);
+}
+
+/*
+ * Used to set the Rx ITR value for all of the PF LAN VSI's queues.
+ * Writes to the ITR registers immediately.
+ */
+static int
+ixl_sysctl_pf_rx_itr(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ device_t dev = pf->dev;
+ int error = 0;
+ int requested_rx_itr;
+
+ requested_rx_itr = pf->rx_itr;
+ error = sysctl_handle_int(oidp, &requested_rx_itr, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (pf->dynamic_rx_itr) {
+ device_printf(dev,
+ "Cannot set RX itr value while dynamic RX itr is enabled\n");
+ return (EINVAL);
+ }
+ if (requested_rx_itr < 0 || requested_rx_itr > IXL_MAX_ITR) {
+ device_printf(dev,
+ "Invalid RX itr value; value must be between 0 and %d\n",
+ IXL_MAX_ITR);
+ return (EINVAL);
+ }
+
+ pf->rx_itr = requested_rx_itr;
+ ixl_configure_rx_itr(pf);
+
+ return (error);
+}
+
+void
+ixl_add_hw_stats(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_queue *queues = vsi->queues;
+ struct i40e_hw_port_stats *pf_stats = &pf->stats;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid *tree = device_get_sysctl_tree(dev);
+ struct sysctl_oid_list *child = SYSCTL_CHILDREN(tree);
+ struct sysctl_oid_list *vsi_list;
+
+ struct sysctl_oid *queue_node;
+ struct sysctl_oid_list *queue_list;
+
+ struct tx_ring *txr;
+ struct rx_ring *rxr;
+ char queue_namebuf[QUEUE_NAME_LEN];
+
+ /* Driver statistics */
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "watchdog_events",
+ CTLFLAG_RD, &pf->watchdog_events,
+ "Watchdog timeouts");
+ SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "admin_irq",
+ CTLFLAG_RD, &pf->admin_irq,
+ "Admin Queue IRQ Handled");
+
+ ixl_add_vsi_sysctls(pf, &pf->vsi, ctx, "pf");
+ vsi_list = SYSCTL_CHILDREN(pf->vsi.vsi_node);
+
+ /* Queue statistics */
+ for (int q = 0; q < vsi->num_queues; q++) {
+ snprintf(queue_namebuf, QUEUE_NAME_LEN, "que%d", q);
+ queue_node = SYSCTL_ADD_NODE(ctx, vsi_list,
+ OID_AUTO, queue_namebuf, CTLFLAG_RD, NULL, "Queue #");
+ queue_list = SYSCTL_CHILDREN(queue_node);
+
+ txr = &(queues[q].txr);
+ rxr = &(queues[q].rxr);
+
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "mbuf_defrag_failed",
+ CTLFLAG_RD, &(queues[q].mbuf_defrag_failed),
+ "m_defrag() failed");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "irqs",
+ CTLFLAG_RD, &(queues[q].irqs),
+ "irqs on this queue");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tso_tx",
+ CTLFLAG_RD, &(queues[q].tso),
+ "TSO");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_dmamap_failed",
+ CTLFLAG_RD, &(queues[q].tx_dmamap_failed),
+ "Driver tx dma failure in xmit");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "no_desc_avail",
+ CTLFLAG_RD, &(txr->no_desc),
+ "Queue No Descriptor Available");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_packets",
+ CTLFLAG_RD, &(txr->total_packets),
+ "Queue Packets Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "tx_bytes",
+ CTLFLAG_RD, &(txr->tx_bytes),
+ "Queue Bytes Transmitted");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_packets",
+ CTLFLAG_RD, &(rxr->rx_packets),
+ "Queue Packets Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_bytes",
+ CTLFLAG_RD, &(rxr->rx_bytes),
+ "Queue Bytes Received");
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_desc_err",
+ CTLFLAG_RD, &(rxr->desc_errs),
+ "Queue Rx Descriptor Errors");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_itr",
+ CTLFLAG_RD, &(rxr->itr), 0,
+ "Queue Rx ITR Interval");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "tx_itr",
+ CTLFLAG_RD, &(txr->itr), 0,
+ "Queue Tx ITR Interval");
+#ifdef IXL_DEBUG
+ SYSCTL_ADD_UQUAD(ctx, queue_list, OID_AUTO, "rx_not_done",
+ CTLFLAG_RD, &(rxr->not_done),
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_refresh",
+ CTLFLAG_RD, &(rxr->next_refresh), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_UINT(ctx, queue_list, OID_AUTO, "rx_next_check",
+ CTLFLAG_RD, &(rxr->next_check), 0,
+ "Queue Rx Descriptors not Done");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qtx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qtx_tail_handler, "IU",
+ "Queue Transmit Descriptor Tail");
+ SYSCTL_ADD_PROC(ctx, queue_list, OID_AUTO, "qrx_tail",
+ CTLTYPE_UINT | CTLFLAG_RD, &queues[q],
+ sizeof(struct ixl_queue),
+ ixl_sysctl_qrx_tail_handler, "IU",
+ "Queue Receive Descriptor Tail");
+#endif
+ }
+
+ /* MAC stats */
+ ixl_add_sysctls_mac_stats(ctx, child, pf_stats);
+}
+
+void
+ixl_add_sysctls_eth_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct i40e_eth_stats *eth_stats)
+{
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&eth_stats->rx_bytes, "good_octets_rcvd", "Good Octets Received"},
+ {&eth_stats->rx_unicast, "ucast_pkts_rcvd",
+ "Unicast Packets Received"},
+ {&eth_stats->rx_multicast, "mcast_pkts_rcvd",
+ "Multicast Packets Received"},
+ {&eth_stats->rx_broadcast, "bcast_pkts_rcvd",
+ "Broadcast Packets Received"},
+ {&eth_stats->rx_discards, "rx_discards", "Discarded RX packets"},
+ {&eth_stats->tx_bytes, "good_octets_txd", "Good Octets Transmitted"},
+ {&eth_stats->tx_unicast, "ucast_pkts_txd", "Unicast Packets Transmitted"},
+ {&eth_stats->tx_multicast, "mcast_pkts_txd",
+ "Multicast Packets Transmitted"},
+ {&eth_stats->tx_broadcast, "bcast_pkts_txd",
+ "Broadcast Packets Transmitted"},
+ // end
+ {0,0,0}
+ };
+
+ struct ixl_sysctl_info *entry = ctls;
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, child, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+void
+ixl_add_sysctls_mac_stats(struct sysctl_ctx_list *ctx,
+ struct sysctl_oid_list *child,
+ struct i40e_hw_port_stats *stats)
+{
+ struct sysctl_oid *stat_node = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "mac",
+ CTLFLAG_RD, NULL, "Mac Statistics");
+ struct sysctl_oid_list *stat_list = SYSCTL_CHILDREN(stat_node);
+
+ struct i40e_eth_stats *eth_stats = &stats->eth;
+ ixl_add_sysctls_eth_stats(ctx, stat_list, eth_stats);
+
+ struct ixl_sysctl_info ctls[] =
+ {
+ {&stats->crc_errors, "crc_errors", "CRC Errors"},
+ {&stats->illegal_bytes, "illegal_bytes", "Illegal Byte Errors"},
+ {&stats->mac_local_faults, "local_faults", "MAC Local Faults"},
+ {&stats->mac_remote_faults, "remote_faults", "MAC Remote Faults"},
+ {&stats->rx_length_errors, "rx_length_errors", "Receive Length Errors"},
+ /* Packet Reception Stats */
+ {&stats->rx_size_64, "rx_frames_64", "64 byte frames received"},
+ {&stats->rx_size_127, "rx_frames_65_127", "65-127 byte frames received"},
+ {&stats->rx_size_255, "rx_frames_128_255", "128-255 byte frames received"},
+ {&stats->rx_size_511, "rx_frames_256_511", "256-511 byte frames received"},
+ {&stats->rx_size_1023, "rx_frames_512_1023", "512-1023 byte frames received"},
+ {&stats->rx_size_1522, "rx_frames_1024_1522", "1024-1522 byte frames received"},
+ {&stats->rx_size_big, "rx_frames_big", "1523-9522 byte frames received"},
+ {&stats->rx_undersize, "rx_undersize", "Undersized packets received"},
+ {&stats->rx_fragments, "rx_fragmented", "Fragmented packets received"},
+ {&stats->rx_oversize, "rx_oversized", "Oversized packets received"},
+ {&stats->rx_jabber, "rx_jabber", "Received Jabber"},
+ {&stats->checksum_error, "checksum_errors", "Checksum Errors"},
+ /* Packet Transmission Stats */
+ {&stats->tx_size_64, "tx_frames_64", "64 byte frames transmitted"},
+ {&stats->tx_size_127, "tx_frames_65_127", "65-127 byte frames transmitted"},
+ {&stats->tx_size_255, "tx_frames_128_255", "128-255 byte frames transmitted"},
+ {&stats->tx_size_511, "tx_frames_256_511", "256-511 byte frames transmitted"},
+ {&stats->tx_size_1023, "tx_frames_512_1023", "512-1023 byte frames transmitted"},
+ {&stats->tx_size_1522, "tx_frames_1024_1522", "1024-1522 byte frames transmitted"},
+ {&stats->tx_size_big, "tx_frames_big", "1523-9522 byte frames transmitted"},
+ /* Flow control */
+ {&stats->link_xon_tx, "xon_txd", "Link XON transmitted"},
+ {&stats->link_xon_rx, "xon_recvd", "Link XON received"},
+ {&stats->link_xoff_tx, "xoff_txd", "Link XOFF transmitted"},
+ {&stats->link_xoff_rx, "xoff_recvd", "Link XOFF received"},
+ /* End */
+ {0,0,0}
+ };
+
+ struct ixl_sysctl_info *entry = ctls;
+ while (entry->stat != 0)
+ {
+ SYSCTL_ADD_UQUAD(ctx, stat_list, OID_AUTO, entry->name,
+ CTLFLAG_RD, entry->stat,
+ entry->description);
+ entry++;
+ }
+}
+
+void
+ixl_set_rss_key(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+#ifdef RSS
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG];
+#else
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c,
+ 0x35897377, 0x328b25e1, 0x4fa98922,
+ 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
+ 0x0, 0x0, 0x0};
+#endif
+
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey((uint8_t *) &rss_seed);
+#endif
+ /* Fill out hash function seed */
+ if (hw->mac.type == I40E_MAC_X722) {
+ struct i40e_aqc_get_set_rss_key_data key_data;
+ bcopy(rss_seed, key_data.standard_rss_key, 40);
+ status = i40e_aq_set_rss_key(hw, vsi->vsi_num, &key_data);
+ if (status)
+ device_printf(dev, "i40e_aq_set_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++)
+ i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), rss_seed[i]);
+ }
+}
+
+/*
+ * Configure enabled PCTYPES for RSS.
+ */
+void
+ixl_set_rss_pctypes(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ u64 set_hena = 0, hena;
+
+#ifdef RSS
+ u32 rss_hash_config;
+
+ rss_hash_config = rss_gethashconfig();
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV4)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV4_UDP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_OTHER);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_IPV6_EX)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_FRAG_IPV6);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_TCP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_TCP);
+ if (rss_hash_config & RSS_HASHTYPE_RSS_UDP_IPV6)
+ set_hena |= ((u64)1 << I40E_FILTER_PCTYPE_NONF_IPV6_UDP);
+#else
+ set_hena = IXL_DEFAULT_RSS_HENA;
+#endif
+ hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
+ ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
+ hena |= set_hena;
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
+ i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
+
+}
+
+void
+ixl_set_rss_hlut(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct ixl_vsi *vsi = &pf->vsi;
+ int i, que_id;
+ int lut_entry_width;
+ u32 lut = 0;
+ enum i40e_status_code status;
+
+ if (hw->mac.type == I40E_MAC_X722)
+ lut_entry_width = 7;
+ else
+ lut_entry_width = pf->hw.func_caps.rss_table_entry_width;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ u8 hlut_buf[512];
+ for (i = 0; i < pf->hw.func_caps.rss_table_size; i++) {
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % vsi->num_queues;
+#else
+ que_id = i % vsi->num_queues;
+#endif
+ lut = (que_id & ((0x1 << lut_entry_width) - 1));
+ hlut_buf[i] = lut;
+ }
+
+ if (hw->mac.type == I40E_MAC_X722) {
+ status = i40e_aq_set_rss_lut(hw, vsi->vsi_num, TRUE, hlut_buf, sizeof(hlut_buf));
+ if (status)
+ device_printf(dev, "i40e_aq_set_rss_lut status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ } else {
+ for (i = 0; i < pf->hw.func_caps.rss_table_size >> 2; i++)
+ wr32(hw, I40E_PFQF_HLUT(i), ((u32 *)hlut_buf)[i]);
+ ixl_flush(hw);
+ }
+}
+
+/*
+** Setup the PF's RSS parameters.
+*/
+void
+ixl_config_rss(struct ixl_pf *pf)
+{
+ ixl_set_rss_key(pf);
+ ixl_set_rss_pctypes(pf);
+ ixl_set_rss_hlut(pf);
+}
+
+/*
+** This routine is run via an vlan config EVENT,
+** it enables us to use the HW Filter table since
+** we can get the vlan id. This just creates the
+** entry in the soft version of the VFTA, init will
+** repopulate the real table.
+*/
+void
+ixl_register_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ if (ifp->if_softc != arg) /* Not our event */
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXL_PF_LOCK(pf);
+ ++vsi->num_vlans;
+ ixl_add_filter(vsi, hw->mac.addr, vtag);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine is run via an vlan
+** unconfig EVENT, remove our entry
+** in the soft vfta.
+*/
+void
+ixl_unregister_vlan(void *arg, struct ifnet *ifp, u16 vtag)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ if (ifp->if_softc != arg)
+ return;
+
+ if ((vtag == 0) || (vtag > 4095)) /* Invalid */
+ return;
+
+ IXL_PF_LOCK(pf);
+ --vsi->num_vlans;
+ ixl_del_filter(vsi, hw->mac.addr, vtag);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+** This routine updates vlan filters, called by init
+** it scans the filter table and then updates the hw
+** after a soft reset.
+*/
+void
+ixl_setup_vlan_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_mac_filter *f;
+ int cnt = 0, flags;
+
+ if (vsi->num_vlans == 0)
+ return;
+ /*
+ ** Scan the filter list for vlan entries,
+ ** mark them for addition and then call
+ ** for the AQ update.
+ */
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (f->flags & IXL_FILTER_VLAN) {
+ f->flags |=
+ (IXL_FILTER_ADD |
+ IXL_FILTER_USED);
+ cnt++;
+ }
+ }
+ if (cnt == 0) {
+ printf("setup vlan: no filters found!\n");
+ return;
+ }
+ flags = IXL_FILTER_VLAN;
+ flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ ixl_add_hw_filters(vsi, flags, cnt);
+ return;
+}
+
+/*
+** Initialize filter list and add filters that the hardware
+** needs to know about.
+**
+** Requires VSI's filter list & seid to be set before calling.
+*/
+void
+ixl_init_filters(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+
+ /* Add broadcast address */
+ ixl_add_filter(vsi, ixl_bcast_addr, IXL_VLAN_ANY);
+
+ /*
+ * Prevent Tx flow control frames from being sent out by
+ * non-firmware transmitters.
+ * This affects every VSI in the PF.
+ */
+ if (pf->enable_tx_fc_filter)
+ i40e_add_filter_to_drop_tx_flow_control_frames(vsi->hw, vsi->seid);
+}
+
+/*
+** This routine adds mulicast filters
+*/
+void
+ixl_add_mc_filter(struct ixl_vsi *vsi, u8 *macaddr)
+{
+ struct ixl_mac_filter *f;
+
+ /* Does one already exist */
+ f = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+ if (f != NULL)
+ return;
+
+ f = ixl_get_filter(vsi);
+ if (f == NULL) {
+ printf("WARNING: no filter available!!\n");
+ return;
+ }
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->vlan = IXL_VLAN_ANY;
+ f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED
+ | IXL_FILTER_MC);
+
+ return;
+}
+
+void
+ixl_reconfigure_filters(struct ixl_vsi *vsi)
+{
+ ixl_add_hw_filters(vsi, IXL_FILTER_USED, vsi->num_macs);
+}
+
+/*
+** This routine adds macvlan filters
+*/
+void
+ixl_add_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f, *tmp;
+ struct ixl_pf *pf;
+ device_t dev;
+
+ DEBUGOUT("ixl_add_filter: begin");
+
+ pf = vsi->back;
+ dev = pf->dev;
+
+ /* Does one already exist */
+ f = ixl_find_filter(vsi, macaddr, vlan);
+ if (f != NULL)
+ return;
+ /*
+ ** Is this the first vlan being registered, if so we
+ ** need to remove the ANY filter that indicates we are
+ ** not in a vlan, and replace that with a 0 filter.
+ */
+ if ((vlan != IXL_VLAN_ANY) && (vsi->num_vlans == 1)) {
+ tmp = ixl_find_filter(vsi, macaddr, IXL_VLAN_ANY);
+ if (tmp != NULL) {
+ ixl_del_filter(vsi, macaddr, IXL_VLAN_ANY);
+ ixl_add_filter(vsi, macaddr, 0);
+ }
+ }
+
+ f = ixl_get_filter(vsi);
+ if (f == NULL) {
+ device_printf(dev, "WARNING: no filter available!!\n");
+ return;
+ }
+ bcopy(macaddr, f->macaddr, ETHER_ADDR_LEN);
+ f->vlan = vlan;
+ f->flags |= (IXL_FILTER_ADD | IXL_FILTER_USED);
+ if (f->vlan != IXL_VLAN_ANY)
+ f->flags |= IXL_FILTER_VLAN;
+ else
+ vsi->num_macs++;
+
+ ixl_add_hw_filters(vsi, f->flags, 1);
+ return;
+}
+
+void
+ixl_del_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f;
+
+ f = ixl_find_filter(vsi, macaddr, vlan);
+ if (f == NULL)
+ return;
+
+ f->flags |= IXL_FILTER_DEL;
+ ixl_del_hw_filters(vsi, 1);
+ vsi->num_macs--;
+
+ /* Check if this is the last vlan removal */
+ if (vlan != IXL_VLAN_ANY && vsi->num_vlans == 0) {
+ /* Switch back to a non-vlan filter */
+ ixl_del_filter(vsi, macaddr, 0);
+ ixl_add_filter(vsi, macaddr, IXL_VLAN_ANY);
+ }
+ return;
+}
+
+/*
+** Find the filter with both matching mac addr and vlan id
+*/
+struct ixl_mac_filter *
+ixl_find_filter(struct ixl_vsi *vsi, u8 *macaddr, s16 vlan)
+{
+ struct ixl_mac_filter *f;
+ bool match = FALSE;
+
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (!cmp_etheraddr(f->macaddr, macaddr))
+ continue;
+ if (f->vlan == vlan) {
+ match = TRUE;
+ break;
+ }
+ }
+
+ if (!match)
+ f = NULL;
+ return (f);
+}
+
+/*
+** This routine takes additions to the vsi filter
+** table and creates an Admin Queue call to create
+** the filters in the hardware.
+*/
+void
+ixl_add_hw_filters(struct ixl_vsi *vsi, int flags, int cnt)
+{
+ struct i40e_aqc_add_macvlan_element_data *a, *b;
+ struct ixl_mac_filter *f;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ device_t dev;
+ int err, j = 0;
+
+ pf = vsi->back;
+ dev = pf->dev;
+ hw = &pf->hw;
+ IXL_PF_LOCK_ASSERT(pf);
+
+ a = malloc(sizeof(struct i40e_aqc_add_macvlan_element_data) * cnt,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (a == NULL) {
+ device_printf(dev, "add_hw_filters failed to get memory\n");
+ return;
+ }
+
+ /*
+ ** Scan the filter list, each time we find one
+ ** we add it to the admin queue array and turn off
+ ** the add bit.
+ */
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ if (f->flags == flags) {
+ b = &a[j]; // a pox on fvl long names :)
+ bcopy(f->macaddr, b->mac_addr, ETHER_ADDR_LEN);
+ if (f->vlan == IXL_VLAN_ANY) {
+ b->vlan_tag = 0;
+ b->flags = I40E_AQC_MACVLAN_ADD_IGNORE_VLAN;
+ } else {
+ b->vlan_tag = f->vlan;
+ b->flags = 0;
+ }
+ b->flags |= I40E_AQC_MACVLAN_ADD_PERFECT_MATCH;
+ f->flags &= ~IXL_FILTER_ADD;
+ j++;
+ }
+ if (j == cnt)
+ break;
+ }
+ if (j > 0) {
+ err = i40e_aq_add_macvlan(hw, vsi->seid, a, j, NULL);
+ if (err)
+ device_printf(dev, "aq_add_macvlan err %d, "
+ "aq_error %d\n", err, hw->aq.asq_last_status);
+ else
+ vsi->hw_filters_add += j;
+ }
+ free(a, M_DEVBUF);
+ return;
+}
+
+/*
+** This routine takes removals in the vsi filter
+** table and creates an Admin Queue call to delete
+** the filters in the hardware.
+*/
+void
+ixl_del_hw_filters(struct ixl_vsi *vsi, int cnt)
+{
+ struct i40e_aqc_remove_macvlan_element_data *d, *e;
+ struct ixl_pf *pf;
+ struct i40e_hw *hw;
+ device_t dev;
+ struct ixl_mac_filter *f, *f_temp;
+ int err, j = 0;
+
+ DEBUGOUT("ixl_del_hw_filters: begin\n");
+
+ pf = vsi->back;
+ hw = &pf->hw;
+ dev = pf->dev;
+
+ d = malloc(sizeof(struct i40e_aqc_remove_macvlan_element_data) * cnt,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (d == NULL) {
+ printf("del hw filter failed to get memory\n");
+ return;
+ }
+
+ SLIST_FOREACH_SAFE(f, &vsi->ftl, next, f_temp) {
+ if (f->flags & IXL_FILTER_DEL) {
+ e = &d[j]; // a pox on fvl long names :)
+ bcopy(f->macaddr, e->mac_addr, ETHER_ADDR_LEN);
+ e->vlan_tag = (f->vlan == IXL_VLAN_ANY ? 0 : f->vlan);
+ e->flags = I40E_AQC_MACVLAN_DEL_PERFECT_MATCH;
+ /* delete entry from vsi list */
+ SLIST_REMOVE(&vsi->ftl, f, ixl_mac_filter, next);
+ free(f, M_DEVBUF);
+ j++;
+ }
+ if (j == cnt)
+ break;
+ }
+ if (j > 0) {
+ err = i40e_aq_remove_macvlan(hw, vsi->seid, d, j, NULL);
+ if (err && hw->aq.asq_last_status != I40E_AQ_RC_ENOENT) {
+ int sc = 0;
+ for (int i = 0; i < j; i++)
+ sc += (!d[i].error_code);
+ vsi->hw_filters_del += sc;
+ device_printf(dev,
+ "Failed to remove %d/%d filters, aq error %d\n",
+ j - sc, j, hw->aq.asq_last_status);
+ } else
+ vsi->hw_filters_del += j;
+ }
+ free(d, M_DEVBUF);
+
+ DEBUGOUT("ixl_del_hw_filters: end\n");
+ return;
+}
+
+int
+ixl_enable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ ixl_dbg(pf, IXL_DBG_EN_DIS,
+ "Enabling PF TX ring %4d / VSI TX ring %4d...\n",
+ pf_qidx, vsi_qidx);
+
+ i40e_pre_tx_queue_cfg(hw, pf_qidx, TRUE);
+
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ reg |= I40E_QTX_ENA_QENA_REQ_MASK |
+ I40E_QTX_ENA_QENA_STAT_MASK;
+ wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
+ /* Verify the enable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(10);
+ }
+ if ((reg & I40E_QTX_ENA_QENA_STAT_MASK) == 0) {
+ device_printf(pf->dev, "TX queue %d still disabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_enable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ ixl_dbg(pf, IXL_DBG_EN_DIS,
+ "Enabling PF RX ring %4d / VSI RX ring %4d...\n",
+ pf_qidx, vsi_qidx);
+
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ reg |= I40E_QRX_ENA_QENA_REQ_MASK |
+ I40E_QRX_ENA_QENA_STAT_MASK;
+ wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
+ /* Verify the enable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK)
+ break;
+ i40e_msec_delay(10);
+ }
+ if ((reg & I40E_QRX_ENA_QENA_STAT_MASK) == 0) {
+ device_printf(pf->dev, "RX queue %d still disabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_enable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ int error = 0;
+
+ error = ixl_enable_tx_ring(pf, qtag, vsi_qidx);
+ /* Called function already prints error message */
+ if (error)
+ return (error);
+ error = ixl_enable_rx_ring(pf, qtag, vsi_qidx);
+ return (error);
+}
+
+/* For PF VSI only */
+int
+ixl_enable_rings(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ error = ixl_enable_ring(pf, &pf->qtag, i);
+ if (error)
+ return (error);
+ }
+
+ return (error);
+}
+
+int
+ixl_disable_tx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ i40e_pre_tx_queue_cfg(hw, pf_qidx, FALSE);
+ i40e_usec_delay(500);
+
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ reg &= ~I40E_QTX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QTX_ENA(pf_qidx), reg);
+ /* Verify the disable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QTX_ENA(pf_qidx));
+ if (!(reg & I40E_QTX_ENA_QENA_STAT_MASK))
+ break;
+ i40e_msec_delay(10);
+ }
+ if (reg & I40E_QTX_ENA_QENA_STAT_MASK) {
+ device_printf(pf->dev, "TX queue %d still enabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_disable_rx_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0;
+ u32 reg;
+ u16 pf_qidx;
+
+ pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
+ wr32(hw, I40E_QRX_ENA(pf_qidx), reg);
+ /* Verify the disable took */
+ for (int j = 0; j < 10; j++) {
+ reg = rd32(hw, I40E_QRX_ENA(pf_qidx));
+ if (!(reg & I40E_QRX_ENA_QENA_STAT_MASK))
+ break;
+ i40e_msec_delay(10);
+ }
+ if (reg & I40E_QRX_ENA_QENA_STAT_MASK) {
+ device_printf(pf->dev, "RX queue %d still enabled!\n",
+ pf_qidx);
+ error = ETIMEDOUT;
+ }
+
+ return (error);
+}
+
+int
+ixl_disable_ring(struct ixl_pf *pf, struct ixl_pf_qtag *qtag, u16 vsi_qidx)
+{
+ int error = 0;
+
+ error = ixl_disable_tx_ring(pf, qtag, vsi_qidx);
+ /* Called function already prints error message */
+ if (error)
+ return (error);
+ error = ixl_disable_rx_ring(pf, qtag, vsi_qidx);
+ return (error);
+}
+
+/* For PF VSI only */
+int
+ixl_disable_rings(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = vsi->back;
+ int error = 0;
+
+ for (int i = 0; i < vsi->num_queues; i++) {
+ error = ixl_disable_ring(pf, &pf->qtag, i);
+ if (error)
+ return (error);
+ }
+
+ return (error);
+}
+
+/**
+ * ixl_handle_mdd_event
+ *
+ * Called from interrupt handler to identify possibly malicious vfs
+ * (But also detects events from the PF, as well)
+ **/
+void
+ixl_handle_mdd_event(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ bool mdd_detected = false;
+ bool pf_mdd_detected = false;
+ u32 reg;
+
+ /* find what triggered the MDD event */
+ reg = rd32(hw, I40E_GL_MDET_TX);
+ if (reg & I40E_GL_MDET_TX_VALID_MASK) {
+ u8 pf_num = (reg & I40E_GL_MDET_TX_PF_NUM_MASK) >>
+ I40E_GL_MDET_TX_PF_NUM_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
+ I40E_GL_MDET_TX_EVENT_SHIFT;
+ u16 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
+ I40E_GL_MDET_TX_QUEUE_SHIFT;
+ device_printf(dev,
+ "Malicious Driver Detection event %d"
+ " on TX queue %d, pf number %d\n",
+ event, queue, pf_num);
+ wr32(hw, I40E_GL_MDET_TX, 0xffffffff);
+ mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_GL_MDET_RX);
+ if (reg & I40E_GL_MDET_RX_VALID_MASK) {
+ u8 pf_num = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
+ I40E_GL_MDET_RX_FUNCTION_SHIFT;
+ u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
+ I40E_GL_MDET_RX_EVENT_SHIFT;
+ u16 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
+ I40E_GL_MDET_RX_QUEUE_SHIFT;
+ device_printf(dev,
+ "Malicious Driver Detection event %d"
+ " on RX queue %d, pf number %d\n",
+ event, queue, pf_num);
+ wr32(hw, I40E_GL_MDET_RX, 0xffffffff);
+ mdd_detected = true;
+ }
+
+ if (mdd_detected) {
+ reg = rd32(hw, I40E_PF_MDET_TX);
+ if (reg & I40E_PF_MDET_TX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_TX, 0xFFFF);
+ device_printf(dev,
+ "MDD TX event is for this function!");
+ pf_mdd_detected = true;
+ }
+ reg = rd32(hw, I40E_PF_MDET_RX);
+ if (reg & I40E_PF_MDET_RX_VALID_MASK) {
+ wr32(hw, I40E_PF_MDET_RX, 0xFFFF);
+ device_printf(dev,
+ "MDD RX event is for this function!");
+ pf_mdd_detected = true;
+ }
+ }
+
+ /* re-enable mdd interrupt cause */
+ reg = rd32(hw, I40E_PFINT_ICR0_ENA);
+ reg |= I40E_PFINT_ICR0_ENA_MAL_DETECT_MASK;
+ wr32(hw, I40E_PFINT_ICR0_ENA, reg);
+ ixl_flush(hw);
+}
+
+void
+ixl_enable_intr(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ if (pf->enable_msix) {
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_enable_queue(hw, que->me);
+ } else
+ ixl_enable_legacy(hw);
+}
+
+void
+ixl_disable_rings_intr(struct ixl_vsi *vsi)
+{
+ struct i40e_hw *hw = vsi->hw;
+ struct ixl_queue *que = vsi->queues;
+
+ for (int i = 0; i < vsi->num_queues; i++, que++)
+ ixl_disable_queue(hw, que->me);
+}
+
+void
+ixl_disable_intr(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = vsi->hw;
+
+ if (pf->enable_msix)
+ ixl_disable_adminq(hw);
+ else
+ ixl_disable_legacy(hw);
+}
+
+void
+ixl_enable_adminq(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+ ixl_flush(hw);
+}
+
+void
+ixl_disable_adminq(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+ ixl_flush(hw);
+}
+
+void
+ixl_enable_queue(struct i40e_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = I40E_PFINT_DYN_CTLN_INTENA_MASK |
+ I40E_PFINT_DYN_CTLN_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+}
+
+void
+ixl_disable_queue(struct i40e_hw *hw, int id)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTLN_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTLN(id), reg);
+}
+
+void
+ixl_enable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+ reg = I40E_PFINT_DYN_CTL0_INTENA_MASK |
+ I40E_PFINT_DYN_CTL0_CLEARPBA_MASK |
+ (IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT);
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+void
+ixl_disable_legacy(struct i40e_hw *hw)
+{
+ u32 reg;
+
+ reg = IXL_ITR_NONE << I40E_PFINT_DYN_CTL0_ITR_INDX_SHIFT;
+ wr32(hw, I40E_PFINT_DYN_CTL0, reg);
+}
+
+void
+ixl_update_stats_counters(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_vf *vf;
+
+ struct i40e_hw_port_stats *nsd = &pf->stats;
+ struct i40e_hw_port_stats *osd = &pf->stats_offsets;
+
+ /* Update hw stats */
+ ixl_stat_update32(hw, I40E_GLPRT_CRCERRS(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->crc_errors, &nsd->crc_errors);
+ ixl_stat_update32(hw, I40E_GLPRT_ILLERRC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->illegal_bytes, &nsd->illegal_bytes);
+ ixl_stat_update48(hw, I40E_GLPRT_GORCH(hw->port),
+ I40E_GLPRT_GORCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_bytes, &nsd->eth.rx_bytes);
+ ixl_stat_update48(hw, I40E_GLPRT_GOTCH(hw->port),
+ I40E_GLPRT_GOTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_bytes, &nsd->eth.tx_bytes);
+ ixl_stat_update32(hw, I40E_GLPRT_RDPC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_discards,
+ &nsd->eth.rx_discards);
+ ixl_stat_update48(hw, I40E_GLPRT_UPRCH(hw->port),
+ I40E_GLPRT_UPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_unicast,
+ &nsd->eth.rx_unicast);
+ ixl_stat_update48(hw, I40E_GLPRT_UPTCH(hw->port),
+ I40E_GLPRT_UPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_unicast,
+ &nsd->eth.tx_unicast);
+ ixl_stat_update48(hw, I40E_GLPRT_MPRCH(hw->port),
+ I40E_GLPRT_MPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_multicast,
+ &nsd->eth.rx_multicast);
+ ixl_stat_update48(hw, I40E_GLPRT_MPTCH(hw->port),
+ I40E_GLPRT_MPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_multicast,
+ &nsd->eth.tx_multicast);
+ ixl_stat_update48(hw, I40E_GLPRT_BPRCH(hw->port),
+ I40E_GLPRT_BPRCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.rx_broadcast,
+ &nsd->eth.rx_broadcast);
+ ixl_stat_update48(hw, I40E_GLPRT_BPTCH(hw->port),
+ I40E_GLPRT_BPTCL(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->eth.tx_broadcast,
+ &nsd->eth.tx_broadcast);
+
+ ixl_stat_update32(hw, I40E_GLPRT_TDOLD(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_dropped_link_down,
+ &nsd->tx_dropped_link_down);
+ ixl_stat_update32(hw, I40E_GLPRT_MLFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_local_faults,
+ &nsd->mac_local_faults);
+ ixl_stat_update32(hw, I40E_GLPRT_MRFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->mac_remote_faults,
+ &nsd->mac_remote_faults);
+ ixl_stat_update32(hw, I40E_GLPRT_RLEC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_length_errors,
+ &nsd->rx_length_errors);
+
+ /* Flow control (LFC) stats */
+ ixl_stat_update32(hw, I40E_GLPRT_LXONRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_rx, &nsd->link_xon_rx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXONTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xon_tx, &nsd->link_xon_tx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXOFFRXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_rx, &nsd->link_xoff_rx);
+ ixl_stat_update32(hw, I40E_GLPRT_LXOFFTXC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->link_xoff_tx, &nsd->link_xoff_tx);
+
+ /* Packet size stats rx */
+ ixl_stat_update48(hw, I40E_GLPRT_PRC64H(hw->port),
+ I40E_GLPRT_PRC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_64, &nsd->rx_size_64);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC127H(hw->port),
+ I40E_GLPRT_PRC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_127, &nsd->rx_size_127);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC255H(hw->port),
+ I40E_GLPRT_PRC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_255, &nsd->rx_size_255);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC511H(hw->port),
+ I40E_GLPRT_PRC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_511, &nsd->rx_size_511);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1023H(hw->port),
+ I40E_GLPRT_PRC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1023, &nsd->rx_size_1023);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC1522H(hw->port),
+ I40E_GLPRT_PRC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_1522, &nsd->rx_size_1522);
+ ixl_stat_update48(hw, I40E_GLPRT_PRC9522H(hw->port),
+ I40E_GLPRT_PRC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_size_big, &nsd->rx_size_big);
+
+ /* Packet size stats tx */
+ ixl_stat_update48(hw, I40E_GLPRT_PTC64H(hw->port),
+ I40E_GLPRT_PTC64L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_64, &nsd->tx_size_64);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC127H(hw->port),
+ I40E_GLPRT_PTC127L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_127, &nsd->tx_size_127);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC255H(hw->port),
+ I40E_GLPRT_PTC255L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_255, &nsd->tx_size_255);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC511H(hw->port),
+ I40E_GLPRT_PTC511L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_511, &nsd->tx_size_511);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1023H(hw->port),
+ I40E_GLPRT_PTC1023L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1023, &nsd->tx_size_1023);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC1522H(hw->port),
+ I40E_GLPRT_PTC1522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_1522, &nsd->tx_size_1522);
+ ixl_stat_update48(hw, I40E_GLPRT_PTC9522H(hw->port),
+ I40E_GLPRT_PTC9522L(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->tx_size_big, &nsd->tx_size_big);
+
+ ixl_stat_update32(hw, I40E_GLPRT_RUC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_undersize, &nsd->rx_undersize);
+ ixl_stat_update32(hw, I40E_GLPRT_RFC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_fragments, &nsd->rx_fragments);
+ ixl_stat_update32(hw, I40E_GLPRT_ROC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_oversize, &nsd->rx_oversize);
+ ixl_stat_update32(hw, I40E_GLPRT_RJC(hw->port),
+ pf->stat_offsets_loaded,
+ &osd->rx_jabber, &nsd->rx_jabber);
+ pf->stat_offsets_loaded = true;
+ /* End hw stats */
+
+ /* Update vsi stats */
+ ixl_update_vsi_stats(vsi);
+
+ for (int i = 0; i < pf->num_vfs; i++) {
+ vf = &pf->vfs[i];
+ if (vf->vf_flags & VF_FLAG_ENABLED)
+ ixl_update_eth_stats(&pf->vfs[i].vsi);
+ }
+}
+
+int
+ixl_rebuild_hw_structs_after_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ bool is_up = false;
+ int error = 0;
+
+ is_up = !!(vsi->ifp->if_drv_flags & IFF_DRV_RUNNING);
+
+ /* Teardown */
+ if (is_up)
+ ixl_stop(pf);
+ error = i40e_shutdown_lan_hmc(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown LAN HMC failed with code %d\n", error);
+ ixl_disable_adminq(hw);
+ ixl_teardown_adminq_msix(pf);
+ error = i40e_shutdown_adminq(hw);
+ if (error)
+ device_printf(dev,
+ "Shutdown Admin queue failed with code %d\n", error);
+
+ /* Setup */
+ error = i40e_init_adminq(hw);
+ if (error != 0 && error != I40E_ERR_FIRMWARE_API_VERSION) {
+ device_printf(dev, "Unable to initialize Admin Queue, error %d\n",
+ error);
+ }
+ error = ixl_setup_adminq_msix(pf);
+ if (error) {
+ device_printf(dev, "ixl_setup_adminq_msix error: %d\n",
+ error);
+ }
+ ixl_configure_intr0_msix(pf);
+ ixl_enable_adminq(hw);
+ error = i40e_init_lan_hmc(hw, hw->func_caps.num_tx_qp,
+ hw->func_caps.num_rx_qp, 0, 0);
+ if (error) {
+ device_printf(dev, "init_lan_hmc failed: %d\n", error);
+ }
+ error = i40e_configure_lan_hmc(hw, I40E_HMC_MODEL_DIRECT_ONLY);
+ if (error) {
+ device_printf(dev, "configure_lan_hmc failed: %d\n", error);
+ }
+ if (is_up)
+ ixl_init(pf);
+
+ return (0);
+}
+
+void
+ixl_handle_empr_reset(struct ixl_pf *pf)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int count = 0;
+ u32 reg;
+
+ /* Typically finishes within 3-4 seconds */
+ while (count++ < 100) {
+ reg = rd32(hw, I40E_GLGEN_RSTAT)
+ & I40E_GLGEN_RSTAT_DEVSTATE_MASK;
+ if (reg)
+ i40e_msec_delay(100);
+ else
+ break;
+ }
+ ixl_dbg(pf, IXL_DBG_INFO,
+ "EMPR reset wait count: %d\n", count);
+
+ device_printf(dev, "Rebuilding driver state...\n");
+ ixl_rebuild_hw_structs_after_reset(pf);
+ device_printf(dev, "Rebuilding driver state done.\n");
+
+ atomic_clear_int(&pf->state, IXL_PF_STATE_EMPR_RESETTING);
+}
+
+/*
+** Tasklet handler for MSIX Adminq interrupts
+** - do outside interrupt since it might sleep
+*/
+void
+ixl_do_adminq(void *context, int pending)
+{
+ struct ixl_pf *pf = context;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_arq_event_info event;
+ i40e_status ret;
+ device_t dev = pf->dev;
+ u32 loop = 0;
+ u16 opcode, result;
+
+ if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
+ /* Flag cleared at end of this function */
+ ixl_handle_empr_reset(pf);
+ return;
+ }
+
+ /* Admin Queue handling */
+ event.buf_len = IXL_AQ_BUF_SZ;
+ event.msg_buf = malloc(event.buf_len,
+ M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (!event.msg_buf) {
+ device_printf(dev, "%s: Unable to allocate memory for Admin"
+ " Queue event!\n", __func__);
+ return;
+ }
+
+ IXL_PF_LOCK(pf);
+ /* clean and process any events */
+ do {
+ ret = i40e_clean_arq_element(hw, &event, &result);
+ if (ret)
+ break;
+ opcode = LE16_TO_CPU(event.desc.opcode);
+ ixl_dbg(pf, IXL_DBG_AQ,
+ "%s: Admin Queue event: %#06x\n", __func__, opcode);
+ switch (opcode) {
+ case i40e_aqc_opc_get_link_status:
+ ixl_link_event(pf, &event);
+ break;
+ case i40e_aqc_opc_send_msg_to_pf:
+#ifdef PCI_IOV
+ ixl_handle_vf_msg(pf, &event);
+#endif
+ break;
+ case i40e_aqc_opc_event_lan_overflow:
+ default:
+ break;
+ }
+
+ } while (result && (loop++ < IXL_ADM_LIMIT));
+
+ free(event.msg_buf, M_DEVBUF);
+
+ /*
+ * If there are still messages to process, reschedule ourselves.
+ * Otherwise, re-enable our interrupt.
+ */
+ if (result > 0)
+ taskqueue_enqueue(pf->tq, &pf->adminq);
+ else
+ ixl_enable_adminq(hw);
+
+ IXL_PF_UNLOCK(pf);
+}
+
+/**
+ * Update VSI-specific ethernet statistics counters.
+ **/
+void
+ixl_update_eth_stats(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_eth_stats *es;
+ struct i40e_eth_stats *oes;
+ struct i40e_hw_port_stats *nsd;
+ u16 stat_idx = vsi->info.stat_counter_idx;
+
+ es = &vsi->eth_stats;
+ oes = &vsi->eth_stats_offsets;
+ nsd = &pf->stats;
+
+ /* Gather up the stats that the hw collects */
+ ixl_stat_update32(hw, I40E_GLV_TEPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_errors, &es->tx_errors);
+ ixl_stat_update32(hw, I40E_GLV_RDPC(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_discards, &es->rx_discards);
+
+ ixl_stat_update48(hw, I40E_GLV_GORCH(stat_idx),
+ I40E_GLV_GORCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_bytes, &es->rx_bytes);
+ ixl_stat_update48(hw, I40E_GLV_UPRCH(stat_idx),
+ I40E_GLV_UPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_unicast, &es->rx_unicast);
+ ixl_stat_update48(hw, I40E_GLV_MPRCH(stat_idx),
+ I40E_GLV_MPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_multicast, &es->rx_multicast);
+ ixl_stat_update48(hw, I40E_GLV_BPRCH(stat_idx),
+ I40E_GLV_BPRCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->rx_broadcast, &es->rx_broadcast);
+
+ ixl_stat_update48(hw, I40E_GLV_GOTCH(stat_idx),
+ I40E_GLV_GOTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_bytes, &es->tx_bytes);
+ ixl_stat_update48(hw, I40E_GLV_UPTCH(stat_idx),
+ I40E_GLV_UPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_unicast, &es->tx_unicast);
+ ixl_stat_update48(hw, I40E_GLV_MPTCH(stat_idx),
+ I40E_GLV_MPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_multicast, &es->tx_multicast);
+ ixl_stat_update48(hw, I40E_GLV_BPTCH(stat_idx),
+ I40E_GLV_BPTCL(stat_idx),
+ vsi->stat_offsets_loaded,
+ &oes->tx_broadcast, &es->tx_broadcast);
+ vsi->stat_offsets_loaded = true;
+}
+
+void
+ixl_update_vsi_stats(struct ixl_vsi *vsi)
+{
+ struct ixl_pf *pf;
+ struct ifnet *ifp;
+ struct i40e_eth_stats *es;
+ u64 tx_discards;
+
+ struct i40e_hw_port_stats *nsd;
+
+ pf = vsi->back;
+ ifp = vsi->ifp;
+ es = &vsi->eth_stats;
+ nsd = &pf->stats;
+
+ ixl_update_eth_stats(vsi);
+
+ tx_discards = es->tx_discards + nsd->tx_dropped_link_down;
+ for (int i = 0; i < vsi->num_queues; i++)
+ tx_discards += vsi->queues[i].txr.br->br_drops;
+
+ /* Update ifnet stats */
+ IXL_SET_IPACKETS(vsi, es->rx_unicast +
+ es->rx_multicast +
+ es->rx_broadcast);
+ IXL_SET_OPACKETS(vsi, es->tx_unicast +
+ es->tx_multicast +
+ es->tx_broadcast);
+ IXL_SET_IBYTES(vsi, es->rx_bytes);
+ IXL_SET_OBYTES(vsi, es->tx_bytes);
+ IXL_SET_IMCASTS(vsi, es->rx_multicast);
+ IXL_SET_OMCASTS(vsi, es->tx_multicast);
+
+ IXL_SET_IERRORS(vsi, nsd->crc_errors + nsd->illegal_bytes +
+ nsd->rx_undersize + nsd->rx_oversize + nsd->rx_fragments +
+ nsd->rx_jabber);
+ IXL_SET_OERRORS(vsi, es->tx_errors);
+ IXL_SET_IQDROPS(vsi, es->rx_discards + nsd->eth.rx_discards);
+ IXL_SET_OQDROPS(vsi, tx_discards);
+ IXL_SET_NOPROTO(vsi, es->rx_unknown_protocol);
+ IXL_SET_COLLISIONS(vsi, 0);
+}
+
+/**
+ * Reset all of the stats for the given pf
+ **/
+void
+ixl_pf_reset_stats(struct ixl_pf *pf)
+{
+ bzero(&pf->stats, sizeof(struct i40e_hw_port_stats));
+ bzero(&pf->stats_offsets, sizeof(struct i40e_hw_port_stats));
+ pf->stat_offsets_loaded = false;
+}
+
+/**
+ * Resets all stats of the given vsi
+ **/
+void
+ixl_vsi_reset_stats(struct ixl_vsi *vsi)
+{
+ bzero(&vsi->eth_stats, sizeof(struct i40e_eth_stats));
+ bzero(&vsi->eth_stats_offsets, sizeof(struct i40e_eth_stats));
+ vsi->stat_offsets_loaded = false;
+}
+
+/**
+ * Read and update a 48 bit stat from the hw
+ *
+ * Since the device stats are not reset at PFReset, they likely will not
+ * be zeroed when the driver starts. We'll save the first values read
+ * and use them as offsets to be subtracted from the raw values in order
+ * to report stats that count from zero.
+ **/
+void
+ixl_stat_update48(struct i40e_hw *hw, u32 hireg, u32 loreg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u64 new_data;
+
+#if defined(__FreeBSD__) && (__FreeBSD_version >= 1000000) && defined(__amd64__)
+ new_data = rd64(hw, loreg);
+#else
+ /*
+ * Use two rd32's instead of one rd64; FreeBSD versions before
+ * 10 don't support 64-bit bus reads/writes.
+ */
+ new_data = rd32(hw, loreg);
+ new_data |= ((u64)(rd32(hw, hireg) & 0xFFFF)) << 32;
+#endif
+
+ if (!offset_loaded)
+ *offset = new_data;
+ if (new_data >= *offset)
+ *stat = new_data - *offset;
+ else
+ *stat = (new_data + ((u64)1 << 48)) - *offset;
+ *stat &= 0xFFFFFFFFFFFFULL;
+}
+
+/**
+ * Read and update a 32 bit stat from the hw
+ **/
+void
+ixl_stat_update32(struct i40e_hw *hw, u32 reg,
+ bool offset_loaded, u64 *offset, u64 *stat)
+{
+ u32 new_data;
+
+ new_data = rd32(hw, reg);
+ if (!offset_loaded)
+ *offset = new_data;
+ if (new_data >= *offset)
+ *stat = (u32)(new_data - *offset);
+ else
+ *stat = (u32)((new_data + ((u64)1 << 32)) - *offset);
+}
+
+void
+ixl_add_device_sysctls(struct ixl_pf *pf)
+{
+ device_t dev = pf->dev;
+
+ struct sysctl_ctx_list *ctx = device_get_sysctl_ctx(dev);
+ struct sysctl_oid_list *ctx_list =
+ SYSCTL_CHILDREN(device_get_sysctl_tree(dev));
+
+ struct sysctl_oid *debug_node;
+ struct sysctl_oid_list *debug_list;
+
+ /* Set up sysctls */
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fc", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_set_flowcntl, "I", IXL_SYSCTL_HELP_FC);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "advertise_speed", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_set_advertise, "I", IXL_SYSCTL_HELP_SET_ADVERTISE);
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "current_speed", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_current_speed, "A", "Current Port Speed");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "fw_version", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_show_fw, "A", "Firmware version");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "unallocated_queues", CTLTYPE_INT | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_unallocated_queues, "I",
+ "Queues not allocated to a PF or VF");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "tx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_pf_tx_itr, "I",
+ "Immediately set TX ITR value for all queues");
+
+ SYSCTL_ADD_PROC(ctx, ctx_list,
+ OID_AUTO, "rx_itr", CTLTYPE_INT | CTLFLAG_RW,
+ pf, 0, ixl_sysctl_pf_rx_itr, "I",
+ "Immediately set RX ITR value for all queues");
+
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "dynamic_rx_itr", CTLFLAG_RW,
+ &pf->dynamic_rx_itr, 0, "Enable dynamic RX ITR");
+
+ SYSCTL_ADD_INT(ctx, ctx_list,
+ OID_AUTO, "dynamic_tx_itr", CTLFLAG_RW,
+ &pf->dynamic_tx_itr, 0, "Enable dynamic TX ITR");
+
+ /* Add sysctls meant to print debug information, but don't list them
+ * in "sysctl -a" output. */
+ debug_node = SYSCTL_ADD_NODE(ctx, ctx_list,
+ OID_AUTO, "debug", CTLFLAG_RD | CTLFLAG_SKIP, NULL, "Debug Sysctls");
+ debug_list = SYSCTL_CHILDREN(debug_node);
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "shared_debug_mask", CTLFLAG_RW,
+ &pf->hw.debug_mask, 0, "Shared code debug message level");
+
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "core_debug_mask", CTLFLAG_RW,
+ &pf->dbg_mask, 0, "Non-hared code debug message level");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "link_status", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_link_status, "A", IXL_SYSCTL_HELP_LINK_STATUS);
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "phy_abilities", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_phy_abilities, "A", "PHY Abilities");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "filter_list", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_sw_filter_list, "A", "SW Filter List");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "hw_res_alloc", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hw_res_alloc, "A", "HW Resource Allocation");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "switch_config", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_switch_config, "A", "HW Switch Configuration");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "rss_key", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hkey, "A", "View RSS key");
+
+ SYSCTL_ADD_PROC(ctx, debug_list,
+ OID_AUTO, "rss_lut", CTLTYPE_STRING | CTLFLAG_RD,
+ pf, 0, ixl_sysctl_hlut, "A", "View RSS lookup table");
+#ifdef PCI_IOV
+ SYSCTL_ADD_UINT(ctx, debug_list,
+ OID_AUTO, "vc_debug_level", CTLFLAG_RW, &pf->vc_debug_lvl,
+ 0, "PF/VF Virtual Channel debug level");
+#endif
+}
+
+/*
+ * Primarily for finding out how many queues can be assigned to VFs,
+ * at runtime.
+ */
+static int
+ixl_sysctl_unallocated_queues(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ int queues;
+
+ IXL_PF_LOCK(pf);
+ queues = (int)ixl_pf_qmgr_get_num_free(&pf->qmgr);
+ IXL_PF_UNLOCK(pf);
+
+ return sysctl_handle_int(oidp, NULL, queues, req);
+}
+
+/*
+** Set flow control using sysctl:
+** 0 - off
+** 1 - rx pause
+** 2 - tx pause
+** 3 - full
+*/
+int
+ixl_set_flowcntl(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int requested_fc, error = 0;
+ enum i40e_status_code aq_error = 0;
+ u8 fc_aq_err = 0;
+
+ /* Get request */
+ requested_fc = pf->fc;
+ error = sysctl_handle_int(oidp, &requested_fc, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ if (requested_fc < 0 || requested_fc > 3) {
+ device_printf(dev,
+ "Invalid fc mode; valid modes are 0 through 3\n");
+ return (EINVAL);
+ }
+
+ /* Set fc ability for port */
+ hw->fc.requested_mode = requested_fc;
+ aq_error = i40e_set_fc(hw, &fc_aq_err, TRUE);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error setting new fc mode %d; fc_err %#x\n",
+ __func__, aq_error, fc_aq_err);
+ return (EIO);
+ }
+ pf->fc = requested_fc;
+
+ /* Get new link state */
+ i40e_msec_delay(250);
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+
+ return (0);
+}
+
+int
+ixl_current_speed(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ int error = 0, index = 0;
+
+ char *speeds[] = {
+ "Unknown",
+ "100M",
+ "1G",
+ "10G",
+ "40G",
+ "20G"
+ };
+
+ ixl_update_link_status(pf);
+
+ switch (hw->phy.link_info.link_speed) {
+ case I40E_LINK_SPEED_100MB:
+ index = 1;
+ break;
+ case I40E_LINK_SPEED_1GB:
+ index = 2;
+ break;
+ case I40E_LINK_SPEED_10GB:
+ index = 3;
+ break;
+ case I40E_LINK_SPEED_40GB:
+ index = 4;
+ break;
+ case I40E_LINK_SPEED_20GB:
+ index = 5;
+ break;
+ case I40E_LINK_SPEED_UNKNOWN:
+ default:
+ index = 0;
+ break;
+ }
+
+ error = sysctl_handle_string(oidp, speeds[index],
+ strlen(speeds[index]), req);
+ return (error);
+}
+
+int
+ixl_set_advertised_speeds(struct ixl_pf *pf, int speeds)
+{
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct i40e_aq_set_phy_config config;
+ enum i40e_status_code aq_error = 0;
+
+ /* Get current capability information */
+ aq_error = i40e_aq_get_phy_capabilities(hw,
+ FALSE, FALSE, &abilities, NULL);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error getting phy capabilities %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EIO);
+ }
+
+ /* Prepare new config */
+ bzero(&config, sizeof(config));
+ config.phy_type = abilities.phy_type;
+ config.abilities = abilities.abilities
+ | I40E_AQ_PHY_ENABLE_ATOMIC_LINK;
+ config.eee_capability = abilities.eee_capability;
+ config.eeer = abilities.eeer_val;
+ config.low_power_ctrl = abilities.d3_lpan;
+ /* Translate into aq cmd link_speed */
+ if (speeds & 0x10)
+ config.link_speed |= I40E_LINK_SPEED_40GB;
+ if (speeds & 0x8)
+ config.link_speed |= I40E_LINK_SPEED_20GB;
+ if (speeds & 0x4)
+ config.link_speed |= I40E_LINK_SPEED_10GB;
+ if (speeds & 0x2)
+ config.link_speed |= I40E_LINK_SPEED_1GB;
+ if (speeds & 0x1)
+ config.link_speed |= I40E_LINK_SPEED_100MB;
+
+ /* Do aq command & restart link */
+ aq_error = i40e_aq_set_phy_config(hw, &config, NULL);
+ if (aq_error) {
+ device_printf(dev,
+ "%s: Error setting new phy config %d,"
+ " aq error: %d\n", __func__, aq_error,
+ hw->aq.asq_last_status);
+ return (EAGAIN);
+ }
+
+ /*
+ ** This seems a bit heavy handed, but we
+ ** need to get a reinit on some devices
+ */
+ IXL_PF_LOCK(pf);
+ ixl_stop_locked(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+
+ return (0);
+}
+
+/*
+** Control link advertise speed:
+** Flags:
+** 0x1 - advertise 100 Mb
+** 0x2 - advertise 1G
+** 0x4 - advertise 10G
+** 0x8 - advertise 20G
+** 0x10 - advertise 40G
+**
+** Set to 0 to disable link
+*/
+int
+ixl_set_advertise(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ int requested_ls = 0;
+ int error = 0;
+
+ /* Read in new mode */
+ requested_ls = pf->advertised_speed;
+ error = sysctl_handle_int(oidp, &requested_ls, 0, req);
+ if ((error) || (req->newptr == NULL))
+ return (error);
+ /* Check for sane value */
+ if (requested_ls > 0x10) {
+ device_printf(dev, "Invalid advertised speed; "
+ "valid modes are 0x1 through 0x10\n");
+ return (EINVAL);
+ }
+ /* Then check for validity based on adapter type */
+ switch (hw->device_id) {
+ case I40E_DEV_ID_1G_BASE_T_X722:
+ /* 1G BaseT */
+ if (requested_ls & ~(0x2)) {
+ device_printf(dev,
+ "Only 1G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_10G_BASE_T:
+ case I40E_DEV_ID_10G_BASE_T4:
+ /* 10G BaseT */
+ if (requested_ls & ~(0x7)) {
+ device_printf(dev,
+ "Only 100M/1G/10G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_20G_KR2:
+ case I40E_DEV_ID_20G_KR2_A:
+ /* 20G */
+ if (requested_ls & ~(0xE)) {
+ device_printf(dev,
+ "Only 1G/10G/20G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ case I40E_DEV_ID_KX_B:
+ case I40E_DEV_ID_QSFP_A:
+ case I40E_DEV_ID_QSFP_B:
+ /* 40G */
+ if (requested_ls & ~(0x10)) {
+ device_printf(dev,
+ "Only 40G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ default:
+ /* 10G (1G) */
+ if (requested_ls & ~(0x6)) {
+ device_printf(dev,
+ "Only 1/10G speeds supported on this device.\n");
+ return (EINVAL);
+ }
+ break;
+ }
+
+ /* Exit if no change */
+ if (pf->advertised_speed == requested_ls)
+ return (0);
+
+ error = ixl_set_advertised_speeds(pf, requested_ls);
+ if (error)
+ return (error);
+
+ pf->advertised_speed = requested_ls;
+ ixl_update_link_status(pf);
+ return (0);
+}
+
+/*
+** Get the width and transaction speed of
+** the bus this adapter is plugged into.
+*/
+void
+ixl_get_bus_info(struct i40e_hw *hw, device_t dev)
+{
+ u16 link;
+ u32 offset;
+
+ /* Some devices don't use PCIE */
+ if (hw->mac.type == I40E_MAC_X722)
+ return;
+
+ /* Read PCI Express Capabilities Link Status Register */
+ pci_find_cap(dev, PCIY_EXPRESS, &offset);
+ link = pci_read_config(dev, offset + PCIER_LINK_STA, 2);
+
+ /* Fill out hw struct with PCIE info */
+ i40e_set_pci_config_data(hw, link);
+
+ /* Use info to print out bandwidth messages */
+ device_printf(dev,"PCI Express Bus: Speed %s %s\n",
+ ((hw->bus.speed == i40e_bus_speed_8000) ? "8.0GT/s":
+ (hw->bus.speed == i40e_bus_speed_5000) ? "5.0GT/s":
+ (hw->bus.speed == i40e_bus_speed_2500) ? "2.5GT/s":"Unknown"),
+ (hw->bus.width == i40e_bus_width_pcie_x8) ? "Width x8" :
+ (hw->bus.width == i40e_bus_width_pcie_x4) ? "Width x4" :
+ (hw->bus.width == i40e_bus_width_pcie_x1) ? "Width x1" :
+ ("Unknown"));
+
+ if ((hw->bus.width <= i40e_bus_width_pcie_x8) &&
+ (hw->bus.speed < i40e_bus_speed_8000)) {
+ device_printf(dev, "PCI-Express bandwidth available"
+ " for this device may be insufficient for"
+ " optimal performance.\n");
+ device_printf(dev, "For optimal performance, a x8 "
+ "PCIE Gen3 slot is required.\n");
+ }
+}
+
+static int
+ixl_sysctl_show_fw(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ struct sbuf *sbuf;
+
+ sbuf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ ixl_nvm_version_str(hw, sbuf);
+ sbuf_finish(sbuf);
+ sbuf_delete(sbuf);
+
+ return 0;
+}
+
+void
+ixl_print_nvm_cmd(device_t dev, struct i40e_nvm_access *nvma)
+{
+ if ((nvma->command == I40E_NVM_READ) &&
+ ((nvma->config & 0xFF) == 0xF) &&
+ (((nvma->config & 0xF00) >> 8) == 0xF) &&
+ (nvma->offset == 0) &&
+ (nvma->data_size == 1)) {
+ // device_printf(dev, "- Get Driver Status Command\n");
+ }
+ else if (nvma->command == I40E_NVM_READ) {
+
+ }
+ else {
+ switch (nvma->command) {
+ case 0xB:
+ device_printf(dev, "- command: I40E_NVM_READ\n");
+ break;
+ case 0xC:
+ device_printf(dev, "- command: I40E_NVM_WRITE\n");
+ break;
+ default:
+ device_printf(dev, "- command: unknown 0x%08x\n", nvma->command);
+ break;
+ }
+
+ device_printf(dev, "- config (ptr) : 0x%02x\n", nvma->config & 0xFF);
+ device_printf(dev, "- config (flags): 0x%01x\n", (nvma->config & 0xF00) >> 8);
+ device_printf(dev, "- offset : 0x%08x\n", nvma->offset);
+ device_printf(dev, "- data_s : 0x%08x\n", nvma->data_size);
+ }
+}
+
+int
+ixl_handle_nvmupd_cmd(struct ixl_pf *pf, struct ifdrv *ifd)
+{
+ struct i40e_hw *hw = &pf->hw;
+ struct i40e_nvm_access *nvma;
+ device_t dev = pf->dev;
+ enum i40e_status_code status = 0;
+ int perrno;
+
+ DEBUGFUNC("ixl_handle_nvmupd_cmd");
+
+ /* Sanity checks */
+ if (ifd->ifd_len < sizeof(struct i40e_nvm_access) ||
+ ifd->ifd_data == NULL) {
+ device_printf(dev, "%s: incorrect ifdrv length or data pointer\n",
+ __func__);
+ device_printf(dev, "%s: ifdrv length: %lu, sizeof(struct i40e_nvm_access): %lu\n",
+ __func__, ifd->ifd_len, sizeof(struct i40e_nvm_access));
+ device_printf(dev, "%s: data pointer: %p\n", __func__,
+ ifd->ifd_data);
+ return (EINVAL);
+ }
+
+ nvma = (struct i40e_nvm_access *)ifd->ifd_data;
+
+ if (pf->dbg_mask & IXL_DBG_NVMUPD)
+ ixl_print_nvm_cmd(dev, nvma);
+
+ if (pf->state & IXL_PF_STATE_EMPR_RESETTING) {
+ int count = 0;
+ while (count++ < 100) {
+ i40e_msec_delay(100);
+ if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING))
+ break;
+ }
+ }
+
+ if (!(pf->state & IXL_PF_STATE_EMPR_RESETTING)) {
+ IXL_PF_LOCK(pf);
+ status = i40e_nvmupd_command(hw, nvma, nvma->data, &perrno);
+ IXL_PF_UNLOCK(pf);
+ } else {
+ perrno = -EBUSY;
+ }
+
+ if (status)
+ device_printf(dev, "i40e_nvmupd_command status %d, perrno %d\n",
+ status, perrno);
+
+ /*
+ * -EPERM is actually ERESTART, which the kernel interprets as it needing
+ * to run this ioctl again. So use -EACCES for -EPERM instead.
+ */
+ if (perrno == -EPERM)
+ return (-EACCES);
+ else
+ return (perrno);
+}
+
+/*********************************************************************
+ *
+ * Media Ioctl callback
+ *
+ * This routine is called whenever the user queries the status of
+ * the interface using ifconfig.
+ *
+ **********************************************************************/
+void
+ixl_media_status(struct ifnet * ifp, struct ifmediareq * ifmr)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_pf *pf = vsi->back;
+ struct i40e_hw *hw = &pf->hw;
+
+ INIT_DEBUGOUT("ixl_media_status: begin");
+ IXL_PF_LOCK(pf);
+
+ hw->phy.get_link_info = TRUE;
+ i40e_get_link_status(hw, &pf->link_up);
+ ixl_update_link_status(pf);
+
+ ifmr->ifm_status = IFM_AVALID;
+ ifmr->ifm_active = IFM_ETHER;
+
+ if (!pf->link_up) {
+ IXL_PF_UNLOCK(pf);
+ return;
+ }
+
+ ifmr->ifm_status |= IFM_ACTIVE;
+
+ /* Hardware always does full-duplex */
+ ifmr->ifm_active |= IFM_FDX;
+
+ switch (hw->phy.link_info.phy_type) {
+ /* 100 M */
+ case I40E_PHY_TYPE_100BASE_TX:
+ ifmr->ifm_active |= IFM_100_TX;
+ break;
+ /* 1 G */
+ case I40E_PHY_TYPE_1000BASE_T:
+ ifmr->ifm_active |= IFM_1000_T;
+ break;
+ case I40E_PHY_TYPE_1000BASE_SX:
+ ifmr->ifm_active |= IFM_1000_SX;
+ break;
+ case I40E_PHY_TYPE_1000BASE_LX:
+ ifmr->ifm_active |= IFM_1000_LX;
+ break;
+ case I40E_PHY_TYPE_1000BASE_T_OPTICAL:
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ /* 10 G */
+ case I40E_PHY_TYPE_10GBASE_SFPP_CU:
+ ifmr->ifm_active |= IFM_10G_TWINAX;
+ break;
+ case I40E_PHY_TYPE_10GBASE_SR:
+ ifmr->ifm_active |= IFM_10G_SR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_LR:
+ ifmr->ifm_active |= IFM_10G_LR;
+ break;
+ case I40E_PHY_TYPE_10GBASE_T:
+ ifmr->ifm_active |= IFM_10G_T;
+ break;
+ case I40E_PHY_TYPE_XAUI:
+ case I40E_PHY_TYPE_XFI:
+ case I40E_PHY_TYPE_10GBASE_AOC:
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ /* 40 G */
+ case I40E_PHY_TYPE_40GBASE_CR4:
+ case I40E_PHY_TYPE_40GBASE_CR4_CU:
+ ifmr->ifm_active |= IFM_40G_CR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_SR4:
+ ifmr->ifm_active |= IFM_40G_SR4;
+ break;
+ case I40E_PHY_TYPE_40GBASE_LR4:
+ ifmr->ifm_active |= IFM_40G_LR4;
+ break;
+ case I40E_PHY_TYPE_XLAUI:
+ ifmr->ifm_active |= IFM_OTHER;
+ break;
+ case I40E_PHY_TYPE_1000BASE_KX:
+ ifmr->ifm_active |= IFM_1000_KX;
+ break;
+ case I40E_PHY_TYPE_SGMII:
+ ifmr->ifm_active |= IFM_1000_SGMII;
+ break;
+ /* ERJ: What's the difference between these? */
+ case I40E_PHY_TYPE_10GBASE_CR1_CU:
+ case I40E_PHY_TYPE_10GBASE_CR1:
+ ifmr->ifm_active |= IFM_10G_CR1;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KX4:
+ ifmr->ifm_active |= IFM_10G_KX4;
+ break;
+ case I40E_PHY_TYPE_10GBASE_KR:
+ ifmr->ifm_active |= IFM_10G_KR;
+ break;
+ case I40E_PHY_TYPE_SFI:
+ ifmr->ifm_active |= IFM_10G_SFI;
+ break;
+ /* Our single 20G media type */
+ case I40E_PHY_TYPE_20GBASE_KR2:
+ ifmr->ifm_active |= IFM_20G_KR2;
+ break;
+ case I40E_PHY_TYPE_40GBASE_KR4:
+ ifmr->ifm_active |= IFM_40G_KR4;
+ break;
+ case I40E_PHY_TYPE_XLPPI:
+ case I40E_PHY_TYPE_40GBASE_AOC:
+ ifmr->ifm_active |= IFM_40G_XLPPI;
+ break;
+ /* Unknown to driver */
+ default:
+ ifmr->ifm_active |= IFM_UNKNOWN;
+ break;
+ }
+ /* Report flow control status as well */
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_TX)
+ ifmr->ifm_active |= IFM_ETH_TXPAUSE;
+ if (hw->phy.link_info.an_info & I40E_AQ_LINK_PAUSE_RX)
+ ifmr->ifm_active |= IFM_ETH_RXPAUSE;
+
+ IXL_PF_UNLOCK(pf);
+}
+
+void
+ixl_init(void *arg)
+{
+ struct ixl_pf *pf = arg;
+ struct ixl_vsi *vsi = &pf->vsi;
+ device_t dev = pf->dev;
+ int error = 0;
+
+ /*
+ * If the aq is dead here, it probably means something outside of the driver
+ * did something to the adapter, like a PF reset.
+ * So rebuild the driver's state here if that occurs.
+ */
+ if (!i40e_check_asq_alive(&pf->hw)) {
+ device_printf(dev, "Admin Queue is down; resetting...\n");
+ IXL_PF_LOCK(pf);
+ ixl_teardown_hw_structs(pf);
+ ixl_reset(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+
+ /*
+ * Set up LAN queue interrupts here.
+ * Kernel interrupt setup functions cannot be called while holding a lock,
+ * so this is done outside of init_locked().
+ */
+ if (pf->msix > 1) {
+ /* Teardown existing interrupts, if they exist */
+ ixl_teardown_queue_msix(vsi);
+ ixl_free_queue_tqs(vsi);
+ /* Then set them up again */
+ error = ixl_setup_queue_msix(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_msix() error: %d\n",
+ error);
+ error = ixl_setup_queue_tqs(vsi);
+ if (error)
+ device_printf(dev, "ixl_setup_queue_tqs() error: %d\n",
+ error);
+ } else
+ // possibly broken
+ error = ixl_assign_vsi_legacy(pf);
+ if (error) {
+ device_printf(pf->dev, "assign_vsi_msix/legacy error: %d\n", error);
+ return;
+ }
+
+ IXL_PF_LOCK(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+}
+
+/*
+ * NOTE: Fortville does not support forcing media speeds. Instead,
+ * use the set_advertise sysctl to set the speeds Fortville
+ * will advertise or be allowed to operate at.
+ */
+int
+ixl_media_change(struct ifnet * ifp)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ifmedia *ifm = &vsi->media;
+
+ INIT_DEBUGOUT("ixl_media_change: begin");
+
+ if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER)
+ return (EINVAL);
+
+ if_printf(ifp, "Use 'advertise_speed' sysctl to change advertised speeds\n");
+
+ return (ENODEV);
+}
+
+/*********************************************************************
+ * Ioctl entry point
+ *
+ * ixl_ioctl is called when the user wants to configure the
+ * interface.
+ *
+ * return 0 on success, positive on failure
+ **********************************************************************/
+
+int
+ixl_ioctl(struct ifnet * ifp, u_long command, caddr_t data)
+{
+ struct ixl_vsi *vsi = ifp->if_softc;
+ struct ixl_pf *pf = vsi->back;
+ struct ifreq *ifr = (struct ifreq *)data;
+ struct ifdrv *ifd = (struct ifdrv *)data;
+#if defined(INET) || defined(INET6)
+ struct ifaddr *ifa = (struct ifaddr *)data;
+ bool avoid_reset = FALSE;
+#endif
+ int error = 0;
+
+ switch (command) {
+
+ case SIOCSIFADDR:
+#ifdef INET
+ if (ifa->ifa_addr->sa_family == AF_INET)
+ avoid_reset = TRUE;
+#endif
+#ifdef INET6
+ if (ifa->ifa_addr->sa_family == AF_INET6)
+ avoid_reset = TRUE;
+#endif
+#if defined(INET) || defined(INET6)
+ /*
+ ** Calling init results in link renegotiation,
+ ** so we avoid doing it when possible.
+ */
+ if (avoid_reset) {
+ ifp->if_flags |= IFF_UP;
+ if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
+ ixl_init(pf);
+#ifdef INET
+ if (!(ifp->if_flags & IFF_NOARP))
+ arp_ifinit(ifp, ifa);
+#endif
+ } else
+ error = ether_ioctl(ifp, command, data);
+ break;
+#endif
+ case SIOCSIFMTU:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFMTU (Set Interface MTU)");
+ if (ifr->ifr_mtu > IXL_MAX_FRAME -
+ ETHER_HDR_LEN - ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) {
+ error = EINVAL;
+ } else {
+ IXL_PF_LOCK(pf);
+ ifp->if_mtu = ifr->ifr_mtu;
+ vsi->max_frame_size =
+ ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN
+ + ETHER_VLAN_ENCAP_LEN;
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCSIFFLAGS:
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFFLAGS (Set Interface Flags)");
+ IXL_PF_LOCK(pf);
+ if (ifp->if_flags & IFF_UP) {
+ if ((ifp->if_drv_flags & IFF_DRV_RUNNING)) {
+ if ((ifp->if_flags ^ pf->if_flags) &
+ (IFF_PROMISC | IFF_ALLMULTI)) {
+ ixl_set_promisc(vsi);
+ }
+ } else {
+ IXL_PF_UNLOCK(pf);
+ ixl_init(pf);
+ IXL_PF_LOCK(pf);
+ }
+ } else {
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_UNLOCK(pf);
+ ixl_stop(pf);
+ IXL_PF_LOCK(pf);
+ }
+ }
+ pf->if_flags = ifp->if_flags;
+ IXL_PF_UNLOCK(pf);
+ break;
+ case SIOCSDRVSPEC:
+ case SIOCGDRVSPEC:
+ IOCTL_DEBUGOUT("ioctl: SIOCxDRVSPEC (Get/Set Driver-specific "
+ "Info)\n");
+
+ /* NVM update command */
+ if (ifd->ifd_cmd == I40E_NVM_ACCESS)
+ error = ixl_handle_nvmupd_cmd(pf, ifd);
+ else
+ error = EINVAL;
+ break;
+ case SIOCADDMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOCADDMULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_disable_intr(vsi);
+ ixl_add_multi(vsi);
+ ixl_enable_intr(vsi);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCDELMULTI:
+ IOCTL_DEBUGOUT("ioctl: SIOCDELMULTI");
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_disable_intr(vsi);
+ ixl_del_multi(vsi);
+ ixl_enable_intr(vsi);
+ IXL_PF_UNLOCK(pf);
+ }
+ break;
+ case SIOCSIFMEDIA:
+ case SIOCGIFMEDIA:
+ case SIOCGIFXMEDIA:
+ IOCTL_DEBUGOUT("ioctl: SIOCxIFMEDIA (Get/Set Interface Media)");
+ error = ifmedia_ioctl(ifp, ifr, &vsi->media, command);
+ break;
+ case SIOCSIFCAP:
+ {
+ int mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ IOCTL_DEBUGOUT("ioctl: SIOCSIFCAP (Set Capabilities)");
+
+ ixl_cap_txcsum_tso(vsi, ifp, mask);
+
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_RXCSUM_IPV6)
+ ifp->if_capenable ^= IFCAP_RXCSUM_IPV6;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & IFCAP_VLAN_HWTAGGING)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTAGGING;
+ if (mask & IFCAP_VLAN_HWFILTER)
+ ifp->if_capenable ^= IFCAP_VLAN_HWFILTER;
+ if (mask & IFCAP_VLAN_HWTSO)
+ ifp->if_capenable ^= IFCAP_VLAN_HWTSO;
+ if (ifp->if_drv_flags & IFF_DRV_RUNNING) {
+ IXL_PF_LOCK(pf);
+ ixl_init_locked(pf);
+ IXL_PF_UNLOCK(pf);
+ }
+ VLAN_CAPABILITIES(ifp);
+
+ break;
+ }
+
+ default:
+ IOCTL_DEBUGOUT("ioctl: UNKNOWN (0x%X)\n", (int)command);
+ error = ether_ioctl(ifp, command, data);
+ break;
+ }
+
+ return (error);
+}
+
+static char *
+ixl_phy_type_string(u32 bit_pos)
+{
+ static char * phy_types_str[32] = {
+ "SGMII",
+ "1000BASE-KX",
+ "10GBASE-KX4",
+ "10GBASE-KR",
+ "40GBASE-KR4",
+ "XAUI",
+ "XFI",
+ "SFI",
+ "XLAUI",
+ "XLPPI",
+ "40GBASE-CR4",
+ "10GBASE-CR1",
+ "Reserved (12)",
+ "Reserved (13)",
+ "Reserved (14)",
+ "Reserved (15)",
+ "Reserved (16)",
+ "100BASE-TX",
+ "1000BASE-T",
+ "10GBASE-T",
+ "10GBASE-SR",
+ "10GBASE-LR",
+ "10GBASE-SFP+Cu",
+ "10GBASE-CR1",
+ "40GBASE-CR4",
+ "40GBASE-SR4",
+ "40GBASE-LR4",
+ "1000BASE-SX",
+ "1000BASE-LX",
+ "1000BASE-T Optical",
+ "20GBASE-KR2",
+ "Reserved (31)"
+ };
+
+ if (bit_pos > 31) return "Invalid";
+ return phy_types_str[bit_pos];
+}
+
+
+static int
+ixl_sysctl_link_status(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct i40e_link_status link_status;
+ enum i40e_status_code status;
+ struct sbuf *buf;
+ int error = 0;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+ return (ENOMEM);
+ }
+
+ status = i40e_aq_get_link_info(hw, true, &link_status, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aq_get_link_info() status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return (EIO);
+ }
+
+ sbuf_printf(buf, "\n"
+ "PHY Type : 0x%02x<%s>\n"
+ "Speed : 0x%02x\n"
+ "Link info: 0x%02x\n"
+ "AN info : 0x%02x\n"
+ "Ext info : 0x%02x\n"
+ "Max Frame: %d\n"
+ "Pacing : 0x%02x\n"
+ "CRC En? : %s\n",
+ link_status.phy_type, ixl_phy_type_string(link_status.phy_type),
+ link_status.link_speed,
+ link_status.link_info, link_status.an_info,
+ link_status.ext_info, link_status.max_frame_size,
+ link_status.pacing,
+ (link_status.crc_enable) ? "Yes" : "No");
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (error);
+}
+
+static int
+ixl_sysctl_phy_abilities(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ enum i40e_status_code status;
+ struct i40e_aq_get_phy_abilities_resp abilities;
+ struct sbuf *buf;
+ int error = 0;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+ return (ENOMEM);
+ }
+
+ status = i40e_aq_get_phy_capabilities(hw,
+ TRUE, FALSE, &abilities, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: i40e_aq_get_phy_capabilities() status %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return (EIO);
+ }
+
+ sbuf_printf(buf, "\n"
+ "PHY Type : %08x",
+ abilities.phy_type);
+
+ if (abilities.phy_type != 0) {
+ sbuf_printf(buf, "<");
+ for (int i = 0; i < 32; i++)
+ if ((1 << i) & abilities.phy_type)
+ sbuf_printf(buf, "%s,", ixl_phy_type_string(i));
+ sbuf_printf(buf, ">\n");
+ }
+
+ sbuf_printf(buf,
+ "Speed : %02x\n"
+ "Abilities: %02x\n"
+ "EEE cap : %04x\n"
+ "EEER reg : %08x\n"
+ "D3 Lpan : %02x\n"
+ "ID : %02x %02x %02x %02x\n"
+ "ModType : %02x %02x %02x",
+ abilities.link_speed,
+ abilities.abilities, abilities.eee_capability,
+ abilities.eeer_val, abilities.d3_lpan,
+ abilities.phy_id[0], abilities.phy_id[1],
+ abilities.phy_id[2], abilities.phy_id[3],
+ abilities.module_type[0], abilities.module_type[1],
+ abilities.module_type[2]);
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (error);
+}
+
+static int
+ixl_sysctl_sw_filter_list(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct ixl_vsi *vsi = &pf->vsi;
+ struct ixl_mac_filter *f;
+ char *buf, *buf_i;
+
+ int error = 0;
+ int ftl_len = 0;
+ int ftl_counter = 0;
+ int buf_len = 0;
+ int entry_len = 42;
+
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ ftl_len++;
+ }
+
+ if (ftl_len < 1) {
+ sysctl_handle_string(oidp, "(none)", 6, req);
+ return (0);
+ }
+
+ buf_len = sizeof(char) * (entry_len + 1) * ftl_len + 2;
+ buf = buf_i = malloc(buf_len, M_DEVBUF, M_NOWAIT);
+
+ sprintf(buf_i++, "\n");
+ SLIST_FOREACH(f, &vsi->ftl, next) {
+ sprintf(buf_i,
+ MAC_FORMAT ", vlan %4d, flags %#06x",
+ MAC_FORMAT_ARGS(f->macaddr), f->vlan, f->flags);
+ buf_i += entry_len;
+ /* don't print '\n' for last entry */
+ if (++ftl_counter != ftl_len) {
+ sprintf(buf_i, "\n");
+ buf_i++;
+ }
+ }
+
+ error = sysctl_handle_string(oidp, buf, strlen(buf), req);
+ if (error)
+ printf("sysctl error: %d\n", error);
+ free(buf, M_DEVBUF);
+ return error;
+}
+
+#define IXL_SW_RES_SIZE 0x14
+int
+ixl_res_alloc_cmp(const void *a, const void *b)
+{
+ const struct i40e_aqc_switch_resource_alloc_element_resp *one, *two;
+ one = (const struct i40e_aqc_switch_resource_alloc_element_resp *)a;
+ two = (const struct i40e_aqc_switch_resource_alloc_element_resp *)b;
+
+ return ((int)one->resource_type - (int)two->resource_type);
+}
+
+/*
+ * Longest string length: 25
+ */
+char *
+ixl_switch_res_type_string(u8 type)
+{
+ char * ixl_switch_res_type_strings[0x14] = {
+ "VEB",
+ "VSI",
+ "Perfect Match MAC address",
+ "S-tag",
+ "(Reserved)",
+ "Multicast hash entry",
+ "Unicast hash entry",
+ "VLAN",
+ "VSI List entry",
+ "(Reserved)",
+ "VLAN Statistic Pool",
+ "Mirror Rule",
+ "Queue Set",
+ "Inner VLAN Forward filter",
+ "(Reserved)",
+ "Inner MAC",
+ "IP",
+ "GRE/VN1 Key",
+ "VN2 Key",
+ "Tunneling Port"
+ };
+
+ if (type < 0x14)
+ return ixl_switch_res_type_strings[type];
+ else
+ return "(Reserved)";
+}
+
+static int
+ixl_sysctl_hw_res_alloc(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ enum i40e_status_code status;
+ int error = 0;
+
+ u8 num_entries;
+ struct i40e_aqc_switch_resource_alloc_element_resp resp[IXL_SW_RES_SIZE];
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ bzero(resp, sizeof(resp));
+ status = i40e_aq_get_switch_resource_alloc(hw, &num_entries,
+ resp,
+ IXL_SW_RES_SIZE,
+ NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: get_switch_resource_alloc() error %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return (error);
+ }
+
+ /* Sort entries by type for display */
+ qsort(resp, num_entries,
+ sizeof(struct i40e_aqc_switch_resource_alloc_element_resp),
+ &ixl_res_alloc_cmp);
+
+ sbuf_cat(buf, "\n");
+ sbuf_printf(buf, "# of entries: %d\n", num_entries);
+ sbuf_printf(buf,
+ " Type | Guaranteed | Total | Used | Un-allocated\n"
+ " | (this) | (all) | (this) | (all) \n");
+ for (int i = 0; i < num_entries; i++) {
+ sbuf_printf(buf,
+ "%25s | %10d %5d %6d %12d",
+ ixl_switch_res_type_string(resp[i].resource_type),
+ resp[i].guaranteed,
+ resp[i].total,
+ resp[i].used,
+ resp[i].total_unalloced);
+ if (i < num_entries - 1)
+ sbuf_cat(buf, "\n");
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+ return (error);
+}
+
+/*
+** Caller must init and delete sbuf; this function will clear and
+** finish it for caller.
+**
+** XXX: Cannot use the SEID for this, since there is no longer a
+** fixed mapping between SEID and element type.
+*/
+char *
+ixl_switch_element_string(struct sbuf *s,
+ struct i40e_aqc_switch_config_element_resp *element)
+{
+ sbuf_clear(s);
+
+ switch (element->element_type) {
+ case I40E_AQ_SW_ELEM_TYPE_MAC:
+ sbuf_printf(s, "MAC %3d", element->element_info);
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_PF:
+ sbuf_printf(s, "PF %3d", element->element_info);
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_VF:
+ sbuf_printf(s, "VF %3d", element->element_info);
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_EMP:
+ sbuf_cat(s, "EMP");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_BMC:
+ sbuf_cat(s, "BMC");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_PV:
+ sbuf_cat(s, "PV");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_VEB:
+ sbuf_cat(s, "VEB");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_PA:
+ sbuf_cat(s, "PA");
+ break;
+ case I40E_AQ_SW_ELEM_TYPE_VSI:
+ sbuf_printf(s, "VSI %3d", element->element_info);
+ break;
+ default:
+ sbuf_cat(s, "?");
+ break;
+ }
+
+ sbuf_finish(s);
+ return sbuf_data(s);
+}
+
+static int
+ixl_sysctl_switch_config(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ struct sbuf *nmbuf;
+ enum i40e_status_code status;
+ int error = 0;
+ u16 next = 0;
+ u8 aq_buf[I40E_AQ_LARGE_BUF];
+
+ struct i40e_aqc_get_switch_config_resp *sw_config;
+ sw_config = (struct i40e_aqc_get_switch_config_resp *)aq_buf;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for sysctl output.\n");
+ return (ENOMEM);
+ }
+
+ status = i40e_aq_get_switch_config(hw, sw_config,
+ sizeof(aq_buf), &next, NULL);
+ if (status) {
+ device_printf(dev,
+ "%s: aq_get_switch_config() error %s, aq error %s\n",
+ __func__, i40e_stat_str(hw, status),
+ i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_delete(buf);
+ return error;
+ }
+ if (next)
+ device_printf(dev, "%s: TODO: get more config with SEID %d\n",
+ __func__, next);
+
+ nmbuf = sbuf_new_auto();
+ if (!nmbuf) {
+ device_printf(dev, "Could not allocate sbuf for name output.\n");
+ sbuf_delete(buf);
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ /* Assuming <= 255 elements in switch */
+ sbuf_printf(buf, "# of reported elements: %d\n", sw_config->header.num_reported);
+ sbuf_printf(buf, "total # of elements: %d\n", sw_config->header.num_total);
+ /* Exclude:
+ ** Revision -- all elements are revision 1 for now
+ */
+ sbuf_printf(buf,
+ "SEID ( Name ) | Uplink | Downlink | Conn Type\n"
+ " | | | (uplink)\n");
+ for (int i = 0; i < sw_config->header.num_reported; i++) {
+ // "%4d (%8s) | %8s %8s %#8x",
+ sbuf_printf(buf, "%4d", sw_config->element[i].seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "(%8s)", ixl_switch_element_string(nmbuf,
+ &sw_config->element[i]));
+ sbuf_cat(buf, " | ");
+ sbuf_printf(buf, "%8d", sw_config->element[i].uplink_seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "%8d", sw_config->element[i].downlink_seid);
+ sbuf_cat(buf, " ");
+ sbuf_printf(buf, "%#8x", sw_config->element[i].connection_type);
+ if (i < sw_config->header.num_reported - 1)
+ sbuf_cat(buf, "\n");
+ }
+ sbuf_delete(nmbuf);
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+static int
+ixl_sysctl_hkey(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+ enum i40e_status_code status;
+ u32 reg;
+
+ struct i40e_aqc_get_set_rss_key_data key_data;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ if (hw->mac.type == I40E_MAC_X722) {
+ bzero(key_data.standard_rss_key, sizeof(key_data.standard_rss_key));
+ status = i40e_aq_get_rss_key(hw, pf->vsi.vsi_num, &key_data);
+ if (status)
+ device_printf(dev, "i40e_aq_get_rss_key status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_printf(buf, "%40D", (u_char *)key_data.standard_rss_key, "");
+ } else {
+ for (int i = 0; i < IXL_RSS_KEY_SIZE_REG; i++) {
+ reg = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
+ sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ }
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
+static int
+ixl_sysctl_hlut(SYSCTL_HANDLER_ARGS)
+{
+ struct ixl_pf *pf = (struct ixl_pf *)arg1;
+ struct i40e_hw *hw = &pf->hw;
+ device_t dev = pf->dev;
+ struct sbuf *buf;
+ int error = 0;
+ enum i40e_status_code status;
+ u8 hlut[512];
+ u32 reg;
+
+ buf = sbuf_new_for_sysctl(NULL, NULL, 128, req);
+ if (!buf) {
+ device_printf(dev, "Could not allocate sbuf for output.\n");
+ return (ENOMEM);
+ }
+
+ sbuf_cat(buf, "\n");
+ if (hw->mac.type == I40E_MAC_X722) {
+ bzero(hlut, sizeof(hlut));
+ status = i40e_aq_get_rss_lut(hw, pf->vsi.vsi_num, TRUE, hlut, sizeof(hlut));
+ if (status)
+ device_printf(dev, "i40e_aq_get_rss_lut status %s, error %s\n",
+ i40e_stat_str(hw, status), i40e_aq_str(hw, hw->aq.asq_last_status));
+ sbuf_printf(buf, "%512D", (u_char *)hlut, "");
+ } else {
+ for (int i = 0; i < hw->func_caps.rss_table_size >> 2; i++) {
+ reg = rd32(hw, I40E_PFQF_HLUT(i));
+ sbuf_printf(buf, "%4D", (u_char *)&reg, "");
+ }
+ }
+
+ error = sbuf_finish(buf);
+ if (error)
+ device_printf(dev, "Error finishing sbuf: %d\n", error);
+ sbuf_delete(buf);
+
+ return (error);
+}
+
diff --git a/sys/dev/ixl/ixl_pf_qmgr.c b/sys/dev/ixl/ixl_pf_qmgr.c
new file mode 100644
index 000000000000..f2842e584dc4
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_qmgr.c
@@ -0,0 +1,308 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixl_pf_qmgr.h"
+
+static int ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num);
+
+int
+ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues)
+{
+ if (num_queues < 1)
+ return (EINVAL);
+
+ qmgr->num_queues = num_queues;
+ qmgr->qinfo = malloc(num_queues * sizeof(struct ixl_pf_qmgr_qinfo),
+ M_IXL, M_ZERO | M_WAITOK);
+ if (qmgr->qinfo == NULL)
+ return ENOMEM;
+
+ return (0);
+}
+
+int
+ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag)
+{
+ int i;
+ int avail;
+ int block_start;
+ u16 alloc_size;
+
+ if (qtag == NULL || num < 1)
+ return (EINVAL);
+
+ /* We have to allocate in power-of-two chunks, so get next power of two */
+ alloc_size = (u16)next_power_of_two(num);
+
+ /* Don't try if there aren't enough queues */
+ avail = ixl_pf_qmgr_get_num_free(qmgr);
+ if (avail < alloc_size)
+ return (ENOSPC);
+
+ block_start = ixl_pf_qmgr_find_free_contiguous_block(qmgr, alloc_size);
+ if (block_start < 0)
+ return (ENOSPC);
+
+ /* Mark queues as allocated */
+ for (i = block_start; i < block_start + alloc_size; i++)
+ qmgr->qinfo[i].allocated = true;
+
+ bzero(qtag, sizeof(*qtag));
+ qtag->qmgr = qmgr;
+ qtag->type = IXL_PF_QALLOC_CONTIGUOUS;
+ qtag->qidx[0] = block_start;
+ qtag->num_allocated = num;
+ qtag->num_active = alloc_size;
+
+ return (0);
+}
+
+/*
+ * NB: indices is u16 because this is the queue index width used in the Add VSI AQ command
+ */
+int
+ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag)
+{
+ int i;
+ int avail, count = 0;
+ u16 alloc_size;
+
+ if (qtag == NULL || num < 1 || num > 16)
+ return (EINVAL);
+
+ /* We have to allocate in power-of-two chunks, so get next power of two */
+ alloc_size = (u16)next_power_of_two(num);
+
+ avail = ixl_pf_qmgr_get_num_free(qmgr);
+ if (avail < alloc_size)
+ return (ENOSPC);
+
+ bzero(qtag, sizeof(*qtag));
+ qtag->qmgr = qmgr;
+ qtag->type = IXL_PF_QALLOC_SCATTERED;
+ qtag->num_active = num;
+ qtag->num_allocated = alloc_size;
+
+ for (i = 0; i < qmgr->num_queues; i++) {
+ if (!qmgr->qinfo[i].allocated) {
+ qtag->qidx[count] = i;
+ count++;
+ qmgr->qinfo[i].allocated = true;
+ if (count == alloc_size)
+ return (0);
+ }
+ }
+
+ // Shouldn't get here
+ return (EDOOFUS);
+}
+
+int
+ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag)
+{
+ u16 i, qidx;
+
+ if (qtag == NULL)
+ return (EINVAL);
+
+ if (qtag->type == IXL_PF_QALLOC_SCATTERED) {
+ for (i = 0; i < qtag->num_allocated; i++) {
+ qidx = qtag->qidx[i];
+ bzero(&qmgr->qinfo[qidx], sizeof(qmgr->qinfo[qidx]));
+ }
+ } else {
+ u16 first_index = qtag->qidx[0];
+ for (i = first_index; i < first_index + qtag->num_allocated; i++)
+ bzero(&qmgr->qinfo[i], sizeof(qmgr->qinfo[qidx]));
+ }
+
+ qtag->qmgr = NULL;
+ return (0);
+}
+
+int
+ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr)
+{
+ return (qmgr->num_queues);
+}
+
+/*
+ * ERJ: This assumes the info array isn't longer than INT_MAX.
+ * This assumption might cause a y3k bug or something, I'm sure.
+ */
+int
+ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr)
+{
+ int count = 0;
+
+ for (int i = 0; i < qmgr->num_queues; i++) {
+ if (!qmgr->qinfo[i].allocated)
+ count++;
+ }
+
+ return (count);
+}
+
+int
+ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start)
+{
+ int i;
+
+ if (start > qmgr->num_queues - 1)
+ return (-EINVAL);
+
+ for (i = start; i < qmgr->num_queues; i++) {
+ if (qmgr->qinfo[i].allocated)
+ continue;
+ else
+ return (i);
+ }
+
+ // No free queues
+ return (-ENOSPC);
+}
+
+void
+ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr)
+{
+ free(qmgr->qinfo, M_IXL);
+ qmgr->qinfo = NULL;
+}
+
+void
+ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ qmgr->qinfo[pf_qidx].tx_enabled = true;
+ else
+ qmgr->qinfo[pf_qidx].rx_enabled = true;
+}
+
+void
+ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ qmgr->qinfo[pf_qidx].tx_enabled = false;
+ else
+ qmgr->qinfo[pf_qidx].rx_enabled = false;
+}
+
+void
+ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ qmgr->qinfo[pf_qidx].tx_configured = true;
+ else
+ qmgr->qinfo[pf_qidx].rx_configured = true;
+}
+
+bool
+ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ return (qmgr->qinfo[pf_qidx].tx_enabled);
+ else
+ return (qmgr->qinfo[pf_qidx].rx_enabled);
+}
+
+bool
+ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx)
+{
+ MPASS(qtag != NULL);
+
+ struct ixl_pf_qmgr *qmgr = qtag->qmgr;
+ u16 pf_qidx = ixl_pf_qidx_from_vsi_qidx(qtag, vsi_qidx);
+ if (tx)
+ return (qmgr->qinfo[pf_qidx].tx_configured);
+ else
+ return (qmgr->qinfo[pf_qidx].rx_configured);
+}
+
+u16
+ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index)
+{
+ MPASS(index < qtag->num_allocated);
+
+ if (qtag->type == IXL_PF_QALLOC_CONTIGUOUS)
+ return qtag->qidx[0] + index;
+ else
+ return qtag->qidx[index];
+}
+
+/* Static Functions */
+
+static int
+ixl_pf_qmgr_find_free_contiguous_block(struct ixl_pf_qmgr *qmgr, int num)
+{
+ int i;
+ int count = 0;
+ bool block_started = false;
+ int possible_start;
+
+ for (i = 0; i < qmgr->num_queues; i++) {
+ if (!qmgr->qinfo[i].allocated) {
+ if (!block_started) {
+ block_started = true;
+ possible_start = i;
+ }
+ count++;
+ if (count == num)
+ return (possible_start);
+ } else { /* this queue is already allocated */
+ block_started = false;
+ count = 0;
+ }
+ }
+
+ /* Can't find a contiguous block of the requested size */
+ return (-1);
+}
+
diff --git a/sys/dev/ixl/ixl_pf_qmgr.h b/sys/dev/ixl/ixl_pf_qmgr.h
new file mode 100644
index 000000000000..d6ad431bd605
--- /dev/null
+++ b/sys/dev/ixl/ixl_pf_qmgr.h
@@ -0,0 +1,109 @@
+/******************************************************************************
+
+ Copyright (c) 2013-2015, Intel Corporation
+ All rights reserved.
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+
+ 2. Redistributions in binary form must reproduce the above copyright
+ notice, this list of conditions and the following disclaimer in the
+ documentation and/or other materials provided with the distribution.
+
+ 3. Neither the name of the Intel Corporation nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+******************************************************************************/
+/*$FreeBSD$*/
+
+
+#include "ixl_pf.h"
+
+#ifndef _IXL_PF_QMGR_H_
+#define _IXL_PF_QMGR_H_
+
+/*
+ * Primarily manages the queues that need to be allocated to VSIs.
+ *
+ * Cardinality: There should only be one of these in a PF.
+ * Lifetime: Created and initialized in attach(); destroyed in detach().
+ */
+
+#define IXL_MAX_SCATTERED_QUEUES 16
+#define IXL_MAX_CONTIGUOUS_QUEUES_XL710 64
+#define IXL_MAX_CONTIGUOUS_QUEUES_X722 128
+
+/* Structures */
+
+/* Manager */
+struct ixl_pf_qmgr_qinfo {
+ bool allocated;
+ bool tx_enabled;
+ bool rx_enabled;
+ bool tx_configured;
+ bool rx_configured;
+};
+
+struct ixl_pf_qmgr {
+ u16 num_queues;
+ struct ixl_pf_qmgr_qinfo *qinfo;
+};
+
+/* Tag */
+enum ixl_pf_qmgr_qalloc_type {
+ IXL_PF_QALLOC_CONTIGUOUS,
+ IXL_PF_QALLOC_SCATTERED
+};
+
+struct ixl_pf_qtag {
+ struct ixl_pf_qmgr *qmgr;
+ enum ixl_pf_qmgr_qalloc_type type;
+ u16 qidx[IXL_MAX_SCATTERED_QUEUES];
+ u16 num_allocated;
+ u16 num_active;
+};
+
+/* Public manager functions */
+int ixl_pf_qmgr_init(struct ixl_pf_qmgr *qmgr, u16 num_queues);
+void ixl_pf_qmgr_destroy(struct ixl_pf_qmgr *qmgr);
+
+int ixl_pf_qmgr_get_num_queues(struct ixl_pf_qmgr *qmgr);
+int ixl_pf_qmgr_get_first_free(struct ixl_pf_qmgr *qmgr, u16 start);
+int ixl_pf_qmgr_get_num_free(struct ixl_pf_qmgr *qmgr);
+
+/* Allocate queues for a VF VSI */
+int ixl_pf_qmgr_alloc_scattered(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag);
+/* Allocate queues for the LAN VSIs, or X722 VF VSIs */
+int ixl_pf_qmgr_alloc_contiguous(struct ixl_pf_qmgr *qmgr, u16 num, struct ixl_pf_qtag *qtag);
+/* Release a queue allocation */
+int ixl_pf_qmgr_release(struct ixl_pf_qmgr *qmgr, struct ixl_pf_qtag *qtag);
+
+/* Help manage queues used in VFs */
+/* Typically hardware refers to RX as 0 and TX as 1, so continue that convention here */
+void ixl_pf_qmgr_mark_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+void ixl_pf_qmgr_mark_queue_disabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+void ixl_pf_qmgr_mark_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+bool ixl_pf_qmgr_is_queue_enabled(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+bool ixl_pf_qmgr_is_queue_configured(struct ixl_pf_qtag *qtag, u16 vsi_qidx, bool tx);
+
+/* Public tag functions */
+u16 ixl_pf_qidx_from_vsi_qidx(struct ixl_pf_qtag *qtag, u16 index);
+
+#endif /* _IXL_PF_QMGR_H_ */
+
diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c
index d3aa7bf7b259..5cf54fa9410a 100644
--- a/sys/dev/ixl/ixl_txrx.c
+++ b/sys/dev/ixl/ixl_txrx.c
@@ -35,7 +35,7 @@
/*
** IXL driver TX/RX Routines:
** This was seperated to allow usage by
-** both the BASE and the VF drivers.
+** both the PF and VF drivers.
*/
#ifndef IXL_STANDALONE_BUILD
@@ -58,15 +58,37 @@ static int ixl_tx_setup_offload(struct ixl_queue *,
struct mbuf *, u32 *, u32 *);
static bool ixl_tso_setup(struct ixl_queue *, struct mbuf *);
-static __inline void ixl_rx_discard(struct rx_ring *, int);
-static __inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
+static inline void ixl_rx_discard(struct rx_ring *, int);
+static inline void ixl_rx_input(struct rx_ring *, struct ifnet *,
struct mbuf *, u8);
+static inline bool ixl_tso_detect_sparse(struct mbuf *mp);
+static int ixl_tx_setup_offload(struct ixl_queue *que,
+ struct mbuf *mp, u32 *cmd, u32 *off);
+static inline u32 ixl_get_tx_head(struct ixl_queue *que);
+
#ifdef DEV_NETMAP
#include <dev/netmap/if_ixl_netmap.h>
#endif /* DEV_NETMAP */
/*
+ * @key key is saved into this parameter
+ */
+void
+ixl_get_default_rss_key(u32 *key)
+{
+ MPASS(key != NULL);
+
+ u32 rss_seed[IXL_RSS_KEY_SIZE_REG] = {0x41b01687,
+ 0x183cfd8c, 0xce880440, 0x580cbc3c,
+ 0x35897377, 0x328b25e1, 0x4fa98922,
+ 0xb7d90c14, 0xd5bad70d, 0xcd15a2c1,
+ 0x0, 0x0, 0x0};
+
+ bcopy(rss_seed, key, IXL_RSS_KEY_SIZE);
+}
+
+/*
** Multiqueue Transmit driver
*/
int
@@ -98,13 +120,6 @@ ixl_mq_start(struct ifnet *ifp, struct mbuf *m)
i = m->m_pkthdr.flowid % vsi->num_queues;
} else
i = curcpu % vsi->num_queues;
- /*
- ** This may not be perfect, but until something
- ** better comes along it will keep from scheduling
- ** on stalled queues.
- */
- if (((1 << i) & vsi->active_queues) == 0)
- i = ffsl(vsi->active_queues);
que = &vsi->queues[i];
txr = &que->txr;
@@ -239,7 +254,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
struct ixl_tx_buf *buf;
struct i40e_tx_desc *txd = NULL;
struct mbuf *m_head, *m;
- int i, j, error, nsegs, maxsegs;
+ int i, j, error, nsegs;
int first, last = 0;
u16 vtag = 0;
u32 cmd, off;
@@ -259,12 +274,10 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
buf = &txr->buffers[first];
map = buf->map;
tag = txr->tx_tag;
- maxsegs = IXL_MAX_TX_SEGS;
if (m_head->m_pkthdr.csum_flags & CSUM_TSO) {
/* Use larger mapping for TSO */
tag = txr->tso_tag;
- maxsegs = IXL_MAX_TSO_SEGS;
if (ixl_tso_detect_sparse(m_head)) {
m = m_defrag(m_head, M_NOWAIT);
if (m == NULL) {
@@ -299,19 +312,19 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp)
*m_headp, segs, &nsegs, BUS_DMA_NOWAIT);
if (error == ENOMEM) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
}
} else if (error == ENOMEM) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
return (error);
} else if (error != 0) {
- que->tx_dma_setup++;
+ que->tx_dmamap_failed++;
m_freem(*m_headp);
*m_headp = NULL;
return (error);
@@ -804,6 +817,7 @@ ixl_tso_setup(struct ixl_queue *que, struct mbuf *mp)
type = I40E_TX_DESC_DTYPE_CONTEXT;
cmd = I40E_TX_CTX_DESC_TSO;
+ /* ERJ: this must not be less than 64 */
mss = mp->m_pkthdr.tso_segsz;
type_cmd_tso_mss = ((u64)type << I40E_TXD_CTX_QW1_DTYPE_SHIFT) |
@@ -1374,7 +1388,7 @@ ixl_free_que_rx(struct ixl_queue *que)
return;
}
-static __inline void
+static inline void
ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
{
@@ -1405,7 +1419,7 @@ ixl_rx_input(struct rx_ring *rxr, struct ifnet *ifp, struct mbuf *m, u8 ptype)
}
-static __inline void
+static inline void
ixl_rx_discard(struct rx_ring *rxr, int i)
{
struct ixl_rx_buf *rbuf;
@@ -1532,7 +1546,7 @@ ixl_rxeof(struct ixl_queue *que, int count)
for (i = rxr->next_check; count != 0;) {
struct mbuf *sendmp, *mh, *mp;
- u32 rsc, status, error;
+ u32 status, error;
u16 hlen, plen, vtag;
u64 qword;
u8 ptype;
@@ -1565,7 +1579,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
count--;
sendmp = NULL;
nbuf = NULL;
- rsc = 0;
cur->wb.qword1.status_error_len = 0;
rbuf = &rxr->buffers[i];
mh = rbuf->m_head;
@@ -1673,10 +1686,6 @@ ixl_rxeof(struct ixl_queue *que, int count)
sendmp = mp;
sendmp->m_flags |= M_PKTHDR;
sendmp->m_pkthdr.len = mp->m_len;
- if (vtag) {
- sendmp->m_pkthdr.ether_vtag = vtag;
- sendmp->m_flags |= M_VLANTAG;
- }
}
/* Pass the head pointer on */
if (eop == 0) {
@@ -1695,6 +1704,11 @@ ixl_rxeof(struct ixl_queue *que, int count)
/* capture data for dynamic ITR adjustment */
rxr->packets++;
rxr->bytes += sendmp->m_pkthdr.len;
+ /* Set VLAN tag (field only valid in eop desc) */
+ if (vtag) {
+ sendmp->m_pkthdr.ether_vtag = vtag;
+ sendmp->m_flags |= M_VLANTAG;
+ }
if ((ifp->if_capenable & IFCAP_RXCSUM) != 0)
ixl_rx_checksum(sendmp, status, error, ptype);
#ifdef RSS
diff --git a/sys/dev/ixl/ixlv.h b/sys/dev/ixl/ixlv.h
index a131ca0a4b22..888b4e0a2bc7 100644
--- a/sys/dev/ixl/ixlv.h
+++ b/sys/dev/ixl/ixlv.h
@@ -44,7 +44,7 @@
#define IXLV_AQ_TIMEOUT (1 * hz)
#define IXLV_CALLOUT_TIMO (hz / 50) /* 20 msec */
-#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1)
+#define IXLV_FLAG_AQ_ENABLE_QUEUES (u32)(1 << 0)
#define IXLV_FLAG_AQ_DISABLE_QUEUES (u32)(1 << 1)
#define IXLV_FLAG_AQ_ADD_MAC_FILTER (u32)(1 << 2)
#define IXLV_FLAG_AQ_ADD_VLAN_FILTER (u32)(1 << 3)
@@ -55,6 +55,10 @@
#define IXLV_FLAG_AQ_HANDLE_RESET (u32)(1 << 8)
#define IXLV_FLAG_AQ_CONFIGURE_PROMISC (u32)(1 << 9)
#define IXLV_FLAG_AQ_GET_STATS (u32)(1 << 10)
+#define IXLV_FLAG_AQ_CONFIG_RSS_KEY (u32)(1 << 11)
+#define IXLV_FLAG_AQ_SET_RSS_HENA (u32)(1 << 12)
+#define IXLV_FLAG_AQ_GET_RSS_HENA_CAPS (u32)(1 << 13)
+#define IXLV_FLAG_AQ_CONFIG_RSS_LUT (u32)(1 << 14)
/* printf %b arg */
#define IXLV_FLAGS \
@@ -62,9 +66,17 @@
"\4ADD_VLAN_FILTER\5DEL_MAC_FILTER\6DEL_VLAN_FILTER" \
"\7CONFIGURE_QUEUES\10MAP_VECTORS\11HANDLE_RESET" \
"\12CONFIGURE_PROMISC\13GET_STATS"
-
-/* Hack for compatibility with 1.0.x linux pf driver */
-#define I40E_VIRTCHNL_OP_EVENT 17
+#define IXLV_PRINTF_VF_OFFLOAD_FLAGS \
+ "\20\1I40E_VIRTCHNL_VF_OFFLOAD_L2" \
+ "\2I40E_VIRTCHNL_VF_OFFLOAD_IWARP" \
+ "\3I40E_VIRTCHNL_VF_OFFLOAD_FCOE" \
+ "\4I40E_VIRTCHNL_VF_OFFLOAD_RSS_AQ" \
+ "\5I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG" \
+ "\6I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR" \
+ "\21I40E_VIRTCHNL_VF_OFFLOAD_VLAN" \
+ "\22I40E_VIRTCHNL_VF_OFFLOAD_RX_POLLING" \
+ "\23I40E_VIRTCHNL_VF_OFFLOAD_RSS_PCTYPE_V2" \
+ "\24I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF"
/* Driver state */
enum ixlv_state_t {
@@ -80,9 +92,11 @@ enum ixlv_state_t {
IXLV_INIT_MAPPING,
IXLV_INIT_ENABLE,
IXLV_INIT_COMPLETE,
- IXLV_RUNNING,
+ IXLV_RUNNING,
};
+/* Structs */
+
struct ixlv_mac_filter {
SLIST_ENTRY(ixlv_mac_filter) next;
u8 macaddr[ETHER_ADDR_LEN];
@@ -101,12 +115,13 @@ SLIST_HEAD(vlan_list, ixlv_vlan_filter);
struct ixlv_sc {
struct i40e_hw hw;
struct i40e_osdep osdep;
- struct device *dev;
+ device_t dev;
struct resource *pci_mem;
struct resource *msix_mem;
enum ixlv_state_t init_state;
+ int init_in_progress;
/*
* Interrupt resources
@@ -154,6 +169,10 @@ struct ixlv_sc {
struct ixl_vc_cmd del_vlan_cmd;
struct ixl_vc_cmd add_multi_cmd;
struct ixl_vc_cmd del_multi_cmd;
+ struct ixl_vc_cmd config_rss_key_cmd;
+ struct ixl_vc_cmd get_rss_hena_caps_cmd;
+ struct ixl_vc_cmd set_rss_hena_cmd;
+ struct ixl_vc_cmd config_rss_lut_cmd;
/* Virtual comm channel */
struct i40e_virtchnl_vf_resource *vf_res;
@@ -209,5 +228,9 @@ void ixlv_del_vlans(struct ixlv_sc *);
void ixlv_update_stats_counters(struct ixlv_sc *,
struct i40e_eth_stats *);
void ixlv_update_link_status(struct ixlv_sc *);
+void ixlv_get_default_rss_key(u32 *, bool);
+void ixlv_config_rss_key(struct ixlv_sc *);
+void ixlv_set_rss_hena(struct ixlv_sc *);
+void ixlv_config_rss_lut(struct ixlv_sc *);
#endif /* _IXLV_H_ */
diff --git a/sys/dev/ixl/ixlvc.c b/sys/dev/ixl/ixlvc.c
index 6a367eab3a6d..7b041c18a8d7 100644
--- a/sys/dev/ixl/ixlvc.c
+++ b/sys/dev/ixl/ixlvc.c
@@ -69,8 +69,10 @@ static int ixl_vc_validate_vf_msg(struct ixlv_sc *sc, u32 v_opcode,
valid_len = sizeof(struct i40e_virtchnl_version_info);
break;
case I40E_VIRTCHNL_OP_RESET_VF:
+ valid_len = 0;
+ break;
case I40E_VIRTCHNL_OP_GET_VF_RESOURCES:
- // TODO: valid length in api v1.0 is 0, v1.1 is 4
+ /* Valid length in api v1.0 is 0, v1.1 is 4 */
valid_len = 4;
break;
case I40E_VIRTCHNL_OP_CONFIG_TX_QUEUE:
@@ -218,7 +220,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
i40e_status err;
int retries = 0;
- event.buf_len = IXL_AQ_BUFSZ;
+ event.buf_len = IXL_AQ_BUF_SZ;
event.msg_buf = malloc(event.buf_len, M_DEVBUF, M_NOWAIT);
if (!event.msg_buf) {
err = ENOMEM;
@@ -230,7 +232,7 @@ ixlv_verify_api_ver(struct ixlv_sc *sc)
goto out_alloc;
/* Initial delay here is necessary */
- i40e_msec_delay(100);
+ i40e_msec_pause(100);
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK)
continue;
@@ -288,7 +290,7 @@ ixlv_send_vf_config_msg(struct ixlv_sc *sc)
u32 caps;
caps = I40E_VIRTCHNL_VF_OFFLOAD_L2 |
- I40E_VIRTCHNL_VF_OFFLOAD_RSS_REG |
+ I40E_VIRTCHNL_VF_OFFLOAD_RSS_PF |
I40E_VIRTCHNL_VF_OFFLOAD_VLAN;
if (sc->pf_version == I40E_VIRTCHNL_VERSION_MINOR_NO_VF_CAPS)
@@ -331,7 +333,7 @@ ixlv_get_vf_config(struct ixlv_sc *sc)
err = i40e_clean_arq_element(hw, &event, NULL);
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
if (++retries <= IXLV_AQ_MAX_ERR)
- i40e_msec_delay(10);
+ i40e_msec_pause(10);
} else if ((enum i40e_virtchnl_ops)le32toh(event.desc.cookie_high) !=
I40E_VIRTCHNL_OP_GET_VF_RESOURCES) {
DDPRINTF(dev, "Received a response from PF,"
@@ -498,7 +500,7 @@ ixlv_map_queues(struct ixlv_sc *sc)
vm->vecmap[i].txq_map = (1 << que->me);
vm->vecmap[i].rxq_map = (1 << que->me);
vm->vecmap[i].rxitr_idx = 0;
- vm->vecmap[i].txitr_idx = 0;
+ vm->vecmap[i].txitr_idx = 1;
}
/* Misc vector last - this is only for AdminQ messages */
@@ -570,13 +572,6 @@ ixlv_add_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
- // ERJ: Should this be taken out?
- if (i == 0) { /* Should not happen... */
- device_printf(dev, "%s: i == 0?\n", __func__);
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_ADD_VLAN_FILTER,
- I40E_SUCCESS);
- return;
- }
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_ADD_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
@@ -640,13 +635,6 @@ ixlv_del_vlans(struct ixlv_sc *sc)
if (i == cnt)
break;
}
- // ERJ: Take this out?
- if (i == 0) { /* Should not happen... */
- device_printf(dev, "%s: i == 0?\n", __func__);
- ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_DEL_VLAN_FILTER,
- I40E_SUCCESS);
- return;
- }
ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_DEL_VLAN, (u8 *)v, len);
free(v, M_DEVBUF);
@@ -842,6 +830,97 @@ ixlv_update_stats_counters(struct ixlv_sc *sc, struct i40e_eth_stats *es)
vsi->eth_stats = *es;
}
+void
+ixlv_config_rss_key(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_rss_key *rss_key_msg;
+ int msg_len, key_length;
+ u8 rss_seed[IXL_RSS_KEY_SIZE];
+
+#ifdef RSS
+ /* Fetch the configured RSS key */
+ rss_getkey((uint8_t *) &rss_seed);
+#else
+ ixl_get_default_rss_key((u32 *)rss_seed);
+#endif
+
+ /* Send the fetched key */
+ key_length = IXL_RSS_KEY_SIZE;
+ msg_len = sizeof(struct i40e_virtchnl_rss_key) + (sizeof(u8) * key_length) - 1;
+ rss_key_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (rss_key_msg == NULL) {
+ device_printf(sc->dev, "Unable to allocate msg memory for RSS key msg.\n");
+ return;
+ }
+
+ rss_key_msg->vsi_id = sc->vsi_res->vsi_id;
+ rss_key_msg->key_len = key_length;
+ bcopy(rss_seed, &rss_key_msg->key[0], key_length);
+
+ DDPRINTF(sc->dev, "config_rss: vsi_id %d, key_len %d",
+ rss_key_msg->vsi_id, rss_key_msg->key_len);
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_KEY,
+ (u8 *)rss_key_msg, msg_len);
+
+ free(rss_key_msg, M_DEVBUF);
+}
+
+void
+ixlv_set_rss_hena(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_rss_hena hena;
+
+ hena.hena = IXL_DEFAULT_RSS_HENA;
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_SET_RSS_HENA,
+ (u8 *)&hena, sizeof(hena));
+}
+
+void
+ixlv_config_rss_lut(struct ixlv_sc *sc)
+{
+ struct i40e_virtchnl_rss_lut *rss_lut_msg;
+ int msg_len;
+ u16 lut_length;
+ u32 lut;
+ int i, que_id;
+
+ lut_length = IXL_RSS_VSI_LUT_SIZE;
+ msg_len = sizeof(struct i40e_virtchnl_rss_lut) + (lut_length * sizeof(u8)) - 1;
+ rss_lut_msg = malloc(msg_len, M_DEVBUF, M_NOWAIT | M_ZERO);
+ if (rss_lut_msg == NULL) {
+ device_printf(sc->dev, "Unable to allocate msg memory for RSS lut msg.\n");
+ return;
+ }
+
+ rss_lut_msg->vsi_id = sc->vsi_res->vsi_id;
+ /* Each LUT entry is a max of 1 byte, so this is easy */
+ rss_lut_msg->lut_entries = lut_length;
+
+ /* Populate the LUT with max no. of queues in round robin fashion */
+ for (i = 0; i < lut_length; i++) {
+#ifdef RSS
+ /*
+ * Fetch the RSS bucket id for the given indirection entry.
+ * Cap it at the number of configured buckets (which is
+ * num_queues.)
+ */
+ que_id = rss_get_indirection_to_bucket(i);
+ que_id = que_id % sc->vsi.num_queues;
+#else
+ que_id = i % sc->vsi.num_queues;
+#endif
+ lut = que_id & IXL_RSS_VSI_LUT_ENTRY_MASK;
+ rss_lut_msg->lut[i] = lut;
+ }
+
+ ixlv_send_pf_msg(sc, I40E_VIRTCHNL_OP_CONFIG_RSS_LUT,
+ (u8 *)rss_lut_msg, msg_len);
+
+ free(rss_lut_msg, M_DEVBUF);
+}
+
/*
** ixlv_vc_completion
**
@@ -940,7 +1019,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixlv_enable_intr(vsi);
/* And inform the stack we're ready */
vsi->ifp->if_drv_flags |= IFF_DRV_RUNNING;
- vsi->ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
+ /* TODO: Clear a state flag, so we know we're ready to run init again */
}
break;
case I40E_VIRTCHNL_OP_DISABLE_QUEUES:
@@ -950,7 +1029,7 @@ ixlv_vc_completion(struct ixlv_sc *sc,
/* Turn off all interrupts */
ixlv_disable_intr(vsi);
/* Tell the stack that the interface is no longer active */
- vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
+ vsi->ifp->if_drv_flags &= ~(IFF_DRV_RUNNING);
}
break;
case I40E_VIRTCHNL_OP_CONFIG_VSI_QUEUES:
@@ -961,6 +1040,18 @@ ixlv_vc_completion(struct ixlv_sc *sc,
ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_MAP_VECTORS,
v_retval);
break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_KEY:
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_KEY,
+ v_retval);
+ break;
+ case I40E_VIRTCHNL_OP_SET_RSS_HENA:
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_SET_RSS_HENA,
+ v_retval);
+ break;
+ case I40E_VIRTCHNL_OP_CONFIG_RSS_LUT:
+ ixl_vc_process_resp(&sc->vc_mgr, IXLV_FLAG_AQ_CONFIG_RSS_LUT,
+ v_retval);
+ break;
default:
#ifdef IXL_DEBUG
device_printf(dev,
@@ -1008,6 +1099,18 @@ ixl_vc_send_cmd(struct ixlv_sc *sc, uint32_t request)
case IXLV_FLAG_AQ_ENABLE_QUEUES:
ixlv_enable_queues(sc);
break;
+
+ case IXLV_FLAG_AQ_CONFIG_RSS_KEY:
+ ixlv_config_rss_key(sc);
+ break;
+
+ case IXLV_FLAG_AQ_SET_RSS_HENA:
+ ixlv_set_rss_hena(sc);
+ break;
+
+ case IXLV_FLAG_AQ_CONFIG_RSS_LUT:
+ ixlv_config_rss_lut(sc);
+ break;
}
}
diff --git a/sys/dev/kbd/kbd.c b/sys/dev/kbd/kbd.c
index 6a386088ff1b..ff49ee61b2c8 100644
--- a/sys/dev/kbd/kbd.c
+++ b/sys/dev/kbd/kbd.c
@@ -284,8 +284,8 @@ kbd_unregister(keyboard_t *kbd)
}
/* find a function table by the driver name */
-keyboard_switch_t
-*kbd_get_switch(char *driver)
+keyboard_switch_t *
+kbd_get_switch(char *driver)
{
const keyboard_driver_t **list;
const keyboard_driver_t *p;
@@ -419,8 +419,8 @@ kbd_change_callback(keyboard_t *kbd, void *id, kbd_callback_func_t *func,
}
/* get a keyboard structure */
-keyboard_t
-*kbd_get_keyboard(int index)
+keyboard_t *
+kbd_get_keyboard(int index)
{
if ((index < 0) || (index >= keyboards))
return (NULL);
@@ -1118,8 +1118,8 @@ fkey_change_ok(fkeytab_t *oldkey, fkeyarg_t *newkey, struct thread *td)
#endif
/* get a pointer to the string associated with the given function key */
-u_char
-*genkbd_get_fkeystr(keyboard_t *kbd, int fkey, size_t *len)
+u_char *
+genkbd_get_fkeystr(keyboard_t *kbd, int fkey, size_t *len)
{
if (kbd == NULL)
return (NULL);
@@ -1131,8 +1131,8 @@ u_char
}
/* diagnostic dump */
-static char
-*get_kbd_type_name(int type)
+static char *
+get_kbd_type_name(int type)
{
static struct {
int type;
diff --git a/sys/dev/mcd/mcd.c b/sys/dev/mcd/mcd.c
deleted file mode 100644
index dacbb3d3e2f0..000000000000
--- a/sys/dev/mcd/mcd.c
+++ /dev/null
@@ -1,1652 +0,0 @@
-/*-
- * Copyright 1993 by Holger Veit (data part)
- * Copyright 1993 by Brian Moore (audio part)
- * Changes Copyright 1993 by Gary Clark II
- * Changes Copyright (C) 1994-1995 by Andrey A. Chernov, Moscow, Russia
- *
- * Rewrote probe routine to work on newer Mitsumi drives.
- * Additional changes (C) 1994 by Jordan K. Hubbard
- *
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This software was developed by Holger Veit and Brian Moore
- * for use with "386BSD" and similar operating systems.
- * "Similar operating systems" includes mainly non-profit oriented
- * systems for research and education, including but not restricted to
- * "NetBSD", "FreeBSD", "Mach" (by CMU).
- * 4. Neither the name of the developer(s) nor the name "386BSD"
- * may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER(S) ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER(S) BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
- * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-static const char __used COPYRIGHT[] = "mcd-driver (C)1993 by H.Veit & B.Moore";
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/conf.h>
-#include <sys/fcntl.h>
-#include <sys/bio.h>
-#include <sys/cdio.h>
-#include <sys/disk.h>
-#include <sys/bus.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-#include <sys/rman.h>
-
-#include <isa/isavar.h>
-
-#include <dev/mcd/mcdreg.h>
-#include <dev/mcd/mcdvar.h>
-
-#define MCD_TRACE(format, args...) \
-{ \
- if (sc->debug) { \
- device_printf(sc->dev, "status=0x%02x: ", \
- sc->data.status); \
- printf(format, ## args); \
- } \
-}
-
-#define RAW_PART 2
-
-/* flags */
-#define MCDVALID 0x0001 /* parameters loaded */
-#define MCDINIT 0x0002 /* device is init'd */
-#define MCDNEWMODEL 0x0004 /* device is new model */
-#define MCDLABEL 0x0008 /* label is read */
-#define MCDPROBING 0x0010 /* probing */
-#define MCDREADRAW 0x0020 /* read raw mode (2352 bytes) */
-#define MCDVOLINFO 0x0040 /* already read volinfo */
-#define MCDTOC 0x0080 /* already read toc */
-#define MCDMBXBSY 0x0100 /* local mbx is busy */
-
-/* status */
-#define MCDAUDIOBSY MCD_ST_AUDIOBSY /* playing audio */
-#define MCDDSKCHNG MCD_ST_DSKCHNG /* sensed change of disk */
-#define MCDDSKIN MCD_ST_DSKIN /* sensed disk in drive */
-#define MCDDOOROPEN MCD_ST_DOOROPEN /* sensed door open */
-
-/* These are apparently the different states a mitsumi can get up to */
-#define MCDCDABSENT 0x0030
-#define MCDCDPRESENT 0x0020
-#define MCDSCLOSED 0x0080
-#define MCDSOPEN 0x00a0
-
-#define MCD_MD_UNKNOWN (-1)
-
-#define MCD_TYPE_UNKNOWN 0
-#define MCD_TYPE_LU002S 1
-#define MCD_TYPE_LU005S 2
-#define MCD_TYPE_LU006S 3
-#define MCD_TYPE_FX001 4
-#define MCD_TYPE_FX001D 5
-
-/* reader state machine */
-#define MCD_S_BEGIN 0
-#define MCD_S_BEGIN1 1
-#define MCD_S_WAITSTAT 2
-#define MCD_S_WAITMODE 3
-#define MCD_S_WAITREAD 4
-
-/* prototypes */
-static void mcd_start(struct mcd_softc *);
-#ifdef NOTYET
-static void mcd_configure(struct mcd_softc *sc);
-#endif
-static int mcd_get(struct mcd_softc *, char *buf, int nmax);
-static int mcd_setflags(struct mcd_softc *);
-static int mcd_getstat(struct mcd_softc *,int sflg);
-static int mcd_send(struct mcd_softc *, int cmd,int nretrys);
-static void hsg2msf(int hsg, bcd_t *msf);
-static int msf2hsg(bcd_t *msf, int relative);
-static int mcd_volinfo(struct mcd_softc *);
-static int mcd_waitrdy(struct mcd_softc *,int dly);
-static void mcd_timeout(void *arg);
-static void mcd_doread(struct mcd_softc *, int state, struct mcd_mbx *mbxin);
-static void mcd_soft_reset(struct mcd_softc *);
-static int mcd_hard_reset(struct mcd_softc *);
-static int mcd_setmode(struct mcd_softc *, int mode);
-static int mcd_getqchan(struct mcd_softc *, struct mcd_qchninfo *q);
-static int mcd_subchan(struct mcd_softc *, struct ioc_read_subchannel *sc,
- int nocopyout);
-static int mcd_toc_header(struct mcd_softc *, struct ioc_toc_header *th);
-static int mcd_read_toc(struct mcd_softc *);
-static int mcd_toc_entrys(struct mcd_softc *, struct ioc_read_toc_entry *te);
-#if 0
-static int mcd_toc_entry(struct mcd_softc *, struct ioc_read_toc_single_entry *te);
-#endif
-static int mcd_stop(struct mcd_softc *);
-static int mcd_eject(struct mcd_softc *);
-static int mcd_inject(struct mcd_softc *);
-static int mcd_playtracks(struct mcd_softc *, struct ioc_play_track *pt);
-static int mcd_play(struct mcd_softc *, struct mcd_read2 *pb);
-static int mcd_playmsf(struct mcd_softc *, struct ioc_play_msf *pt);
-static int mcd_playblocks(struct mcd_softc *, struct ioc_play_blocks *);
-static int mcd_pause(struct mcd_softc *);
-static int mcd_resume(struct mcd_softc *);
-static int mcd_lock_door(struct mcd_softc *, int lock);
-static int mcd_close_tray(struct mcd_softc *);
-static int mcd_size(struct cdev *dev);
-
-static d_open_t mcdopen;
-static d_close_t mcdclose;
-static d_ioctl_t mcdioctl;
-static d_strategy_t mcdstrategy;
-
-static struct cdevsw mcd_cdevsw = {
- .d_version = D_VERSION,
- .d_open = mcdopen,
- .d_close = mcdclose,
- .d_read = physread,
- .d_ioctl = mcdioctl,
- .d_strategy = mcdstrategy,
- .d_name = "mcd",
- .d_flags = D_DISK,
-};
-
-#define MCD_RETRYS 5
-#define MCD_RDRETRYS 8
-
-#define CLOSE_TRAY_SECS 8
-#define DISK_SENSE_SECS 3
-#define WAIT_FRAC 4
-
-/* several delays */
-#define RDELAY_WAITSTAT 300
-#define RDELAY_WAITMODE 300
-#define RDELAY_WAITREAD 800
-
-#define MIN_DELAY 15
-#define DELAY_GETREPLY 5000000
-
-int
-mcd_attach(struct mcd_softc *sc)
-{
- int unit;
-
- unit = device_get_unit(sc->dev);
-
- MCD_LOCK(sc);
- sc->data.flags |= MCDINIT;
- mcd_soft_reset(sc);
- bioq_init(&sc->data.head);
-
-#ifdef NOTYET
- /* wire controller for interrupts and dma */
- mcd_configure(sc);
-#endif
- MCD_UNLOCK(sc);
- /* name filled in probe */
- sc->mcd_dev_t = make_dev(&mcd_cdevsw, 8 * unit,
- UID_ROOT, GID_OPERATOR, 0640, "mcd%d", unit);
-
- sc->mcd_dev_t->si_drv1 = (void *)sc;
- callout_init_mtx(&sc->timer, &sc->mtx, 0);
-
- return (0);
-}
-
-static int
-mcdopen(struct cdev *dev, int flags, int fmt, struct thread *td)
-{
- struct mcd_softc *sc;
- int r,retry;
-
- sc = (struct mcd_softc *)dev->si_drv1;
-
- /* invalidated in the meantime? mark all open part's invalid */
- MCD_LOCK(sc);
- if (!(sc->data.flags & MCDVALID) && sc->data.openflags) {
- MCD_UNLOCK(sc);
- return (ENXIO);
- }
-
- if (mcd_getstat(sc, 1) == -1) {
- MCD_UNLOCK(sc);
- return (EIO);
- }
-
- if ( (sc->data.status & (MCDDSKCHNG|MCDDOOROPEN))
- || !(sc->data.status & MCDDSKIN))
- for (retry = 0; retry < DISK_SENSE_SECS * WAIT_FRAC; retry++) {
- (void) mtx_sleep(sc, &sc->mtx, PSOCK | PCATCH,
- "mcdsn1", hz/WAIT_FRAC);
- if ((r = mcd_getstat(sc, 1)) == -1) {
- MCD_UNLOCK(sc);
- return (EIO);
- }
- if (r != -2)
- break;
- }
-
- if (sc->data.status & MCDDOOROPEN) {
- MCD_UNLOCK(sc);
- device_printf(sc->dev, "door is open\n");
- return (ENXIO);
- }
- if (!(sc->data.status & MCDDSKIN)) {
- MCD_UNLOCK(sc);
- device_printf(sc->dev, "no CD inside\n");
- return (ENXIO);
- }
- if (sc->data.status & MCDDSKCHNG) {
- MCD_UNLOCK(sc);
- device_printf(sc->dev, "CD not sensed\n");
- return (ENXIO);
- }
-
- if (mcd_size(dev) < 0) {
- MCD_UNLOCK(sc);
- device_printf(sc->dev, "failed to get disk size\n");
- return (ENXIO);
- }
-
- sc->data.openflags = 1;
- sc->data.partflags |= MCDREADRAW;
- sc->data.flags |= MCDVALID;
-
- (void) mcd_lock_door(sc, MCD_LK_LOCK);
- if (!(sc->data.flags & MCDVALID)) {
- MCD_UNLOCK(sc);
- return (ENXIO);
- }
-
- r = mcd_read_toc(sc);
- MCD_UNLOCK(sc);
- return (r);
-}
-
-static int
-mcdclose(struct cdev *dev, int flags, int fmt, struct thread *td)
-{
- struct mcd_softc *sc;
-
- sc = (struct mcd_softc *)dev->si_drv1;
-
- MCD_LOCK(sc);
- KASSERT(sc->data.openflags, ("device not open"));
-
- (void) mcd_lock_door(sc, MCD_LK_UNLOCK);
- sc->data.openflags = 0;
- sc->data.partflags &= ~MCDREADRAW;
- MCD_UNLOCK(sc);
-
- return (0);
-}
-
-static void
-mcdstrategy(struct bio *bp)
-{
- struct mcd_softc *sc;
-
- sc = (struct mcd_softc *)bp->bio_dev->si_drv1;
-
- /* if device invalidated (e.g. media change, door open), error */
- MCD_LOCK(sc);
- if (!(sc->data.flags & MCDVALID)) {
- device_printf(sc->dev, "media changed\n");
- bp->bio_error = EIO;
- goto bad;
- }
-
- /* read only */
- if (!(bp->bio_cmd == BIO_READ)) {
- bp->bio_error = EROFS;
- goto bad;
- }
-
- /* no data to read */
- if (bp->bio_bcount == 0)
- goto done;
-
- if (!(sc->data.flags & MCDTOC)) {
- bp->bio_error = EIO;
- goto bad;
- }
-
- bp->bio_resid = 0;
-
- /* queue it */
- bioq_disksort(&sc->data.head, bp);
-
- /* now check whether we can perform processing */
- mcd_start(sc);
- MCD_UNLOCK(sc);
- return;
-
-bad:
- bp->bio_flags |= BIO_ERROR;
-done:
- MCD_UNLOCK(sc);
- bp->bio_resid = bp->bio_bcount;
- biodone(bp);
- return;
-}
-
-static void
-mcd_start(struct mcd_softc *sc)
-{
- struct bio *bp;
-
- MCD_ASSERT_LOCKED(sc);
- if (sc->data.flags & MCDMBXBSY) {
- return;
- }
-
- bp = bioq_takefirst(&sc->data.head);
- if (bp != 0) {
- /* block found to process, dequeue */
- /*MCD_TRACE("mcd_start: found block bp=0x%x\n",bp,0,0,0);*/
- sc->data.flags |= MCDMBXBSY;
- } else {
- /* nothing to do */
- return;
- }
-
- sc->data.mbx.retry = MCD_RETRYS;
- sc->data.mbx.bp = bp;
-
- mcd_doread(sc, MCD_S_BEGIN,&(sc->data.mbx));
- return;
-}
-
-static int
-mcdioctl(struct cdev *dev, u_long cmd, caddr_t addr, int flags, struct thread *td)
-{
- struct mcd_softc *sc;
- int retry,r;
-
- sc = (struct mcd_softc *)dev->si_drv1;
-
- MCD_LOCK(sc);
- if (mcd_getstat(sc, 1) == -1) { /* detect disk change too */
- MCD_UNLOCK(sc);
- return (EIO);
- }
-MCD_TRACE("ioctl called 0x%lx\n", cmd);
-
- switch (cmd) {
- case CDIOCSETPATCH:
- case CDIOCGETVOL:
- case CDIOCSETVOL:
- case CDIOCSETMONO:
- case CDIOCSETSTERIO:
- case CDIOCSETMUTE:
- case CDIOCSETLEFT:
- case CDIOCSETRIGHT:
- MCD_UNLOCK(sc);
- return (EINVAL);
- case CDIOCEJECT:
- r = mcd_eject(sc);
- MCD_UNLOCK(sc);
- return (r);
- case CDIOCSETDEBUG:
- sc->data.debug = 1;
- MCD_UNLOCK(sc);
- return (0);
- case CDIOCCLRDEBUG:
- sc->data.debug = 0;
- MCD_UNLOCK(sc);
- return (0);
- case CDIOCRESET:
- r = mcd_hard_reset(sc);
- MCD_UNLOCK(sc);
- return (r);
- case CDIOCALLOW:
- r = mcd_lock_door(sc, MCD_LK_UNLOCK);
- MCD_UNLOCK(sc);
- return (r);
- case CDIOCPREVENT:
- r = mcd_lock_door(sc, MCD_LK_LOCK);
- MCD_UNLOCK(sc);
- return (r);
- case CDIOCCLOSE:
- r = mcd_inject(sc);
- MCD_UNLOCK(sc);
- return (r);
- }
-
- if (!(sc->data.flags & MCDVALID)) {
- if ( (sc->data.status & (MCDDSKCHNG|MCDDOOROPEN))
- || !(sc->data.status & MCDDSKIN))
- for (retry = 0; retry < DISK_SENSE_SECS * WAIT_FRAC; retry++) {
- (void) mtx_sleep(sc, &sc->mtx, PSOCK | PCATCH,
- "mcdsn2", hz/WAIT_FRAC);
- if ((r = mcd_getstat(sc, 1)) == -1) {
- MCD_UNLOCK(sc);
- return (EIO);
- }
- if (r != -2)
- break;
- }
- if ( (sc->data.status & (MCDDOOROPEN|MCDDSKCHNG))
- || !(sc->data.status & MCDDSKIN)
- || mcd_size(dev) < 0
- ) {
- MCD_UNLOCK(sc);
- return (ENXIO);
- }
- sc->data.flags |= MCDVALID;
- sc->data.partflags |= MCDREADRAW;
- (void) mcd_lock_door(sc, MCD_LK_LOCK);
- if (!(sc->data.flags & MCDVALID)) {
- MCD_UNLOCK(sc);
- return (ENXIO);
- }
- }
-
- switch (cmd) {
- case DIOCGMEDIASIZE:
- *(off_t *)addr = (off_t)sc->data.disksize * sc->data.blksize;
- r = 0;
- break;
- case DIOCGSECTORSIZE:
- *(u_int *)addr = sc->data.blksize;
- r = 0;
- break;
- case CDIOCPLAYTRACKS:
- r = mcd_playtracks(sc, (struct ioc_play_track *) addr);
- break;
- case CDIOCPLAYBLOCKS:
- r = mcd_playblocks(sc, (struct ioc_play_blocks *) addr);
- break;
- case CDIOCPLAYMSF:
- r = mcd_playmsf(sc, (struct ioc_play_msf *) addr);
- break;
- case CDIOCREADSUBCHANNEL_SYSSPACE:
- return mcd_subchan(sc, (struct ioc_read_subchannel *) addr, 1);
- case CDIOCREADSUBCHANNEL:
- return mcd_subchan(sc, (struct ioc_read_subchannel *) addr, 0);
- case CDIOREADTOCHEADER:
- r = mcd_toc_header(sc, (struct ioc_toc_header *) addr);
- break;
- case CDIOREADTOCENTRYS:
- return mcd_toc_entrys(sc, (struct ioc_read_toc_entry *) addr);
- case CDIOCRESUME:
- r = mcd_resume(sc);
- break;
- case CDIOCPAUSE:
- r = mcd_pause(sc);
- break;
- case CDIOCSTART:
- if (mcd_setmode(sc, MCD_MD_COOKED) != 0)
- r = EIO;
- else
- r = 0;
- break;
- case CDIOCSTOP:
- r = mcd_stop(sc);
- break;
- default:
- r = ENOTTY;
- }
- MCD_UNLOCK(sc);
- return (r);
-}
-
-static int
-mcd_size(struct cdev *dev)
-{
- struct mcd_softc *sc;
- int size;
-
- sc = (struct mcd_softc *)dev->si_drv1;
-
- if (mcd_volinfo(sc) == 0) {
- sc->data.blksize = MCDBLK;
- size = msf2hsg(sc->data.volinfo.vol_msf, 0);
- sc->data.disksize = size * (MCDBLK/DEV_BSIZE);
- return (0);
- }
- return (-1);
-}
-
-/***************************************************************
- * lower level of driver starts here
- **************************************************************/
-
-#ifdef NOTDEF
-static char
-irqs[] = {
- 0x00,0x00,0x10,0x20,0x00,0x30,0x00,0x00,
- 0x00,0x10,0x40,0x50,0x00,0x00,0x00,0x00
-};
-
-static char
-drqs[] = {
- 0x00,0x01,0x00,0x03,0x00,0x05,0x06,0x07,
-};
-#endif
-
-#ifdef NOT_YET
-static void
-mcd_configure(struct mcd_softc *sc)
-{
- MCD_WRITE(sc, MCD_REG_CONFIG, sc->data.config);
-}
-#endif
-
-/* Wait for non-busy - return 0 on timeout */
-static int
-twiddle_thumbs(struct mcd_softc *sc, int count, char *whine)
-{
- int i;
-
- for (i = 0; i < count; i++) {
- if (!(MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL))
- return (1);
- }
- if (bootverbose)
- device_printf(sc->dev, "timeout %s\n", whine);
- return (0);
-}
-
-/* check to see if a Mitsumi CD-ROM is attached to the ISA bus */
-
-int
-mcd_probe(struct mcd_softc *sc)
-{
- int i, j;
- unsigned char stbytes[3];
-
- sc->data.flags = MCDPROBING;
-
-#ifdef NOTDEF
- /* get irq/drq configuration word */
- sc->data.config = irqs[dev->id_irq]; /* | drqs[dev->id_drq];*/
-#else
- sc->data.config = 0;
-#endif
-
- /* send a reset */
- MCD_WRITE(sc, MCD_FLAGS, M_RESET);
-
- /*
- * delay awhile by getting any pending garbage (old data) and
- * throwing it away.
- */
- for (i = 1000000; i != 0; i--)
- (void)MCD_READ(sc, MCD_FLAGS);
-
- /* Get status */
- MCD_WRITE(sc, MCD_DATA, MCD_CMDGETSTAT);
- if (!twiddle_thumbs(sc, 1000000, "getting status"))
- return (ENXIO); /* Timeout */
- /* Get version information */
- MCD_WRITE(sc, MCD_DATA, MCD_CMDCONTINFO);
- for (j = 0; j < 3; j++) {
- if (!twiddle_thumbs(sc, 3000, "getting version info"))
- return (ENXIO);
- stbytes[j] = (MCD_READ(sc, MCD_DATA) & 0xFF);
- }
- if (stbytes[1] == stbytes[2])
- return (ENXIO);
- if (stbytes[2] >= 4 || stbytes[1] != 'M') {
- MCD_WRITE(sc, MCD_CTRL, M_PICKLE);
- sc->data.flags |= MCDNEWMODEL;
- }
- sc->data.read_command = MCD_CMDSINGLESPEEDREAD;
- switch (stbytes[1]) {
- case 'M':
- if (stbytes[2] <= 2) {
- sc->data.type = MCD_TYPE_LU002S;
- sc->data.name = "Mitsumi LU002S";
- } else if (stbytes[2] <= 5) {
- sc->data.type = MCD_TYPE_LU005S;
- sc->data.name = "Mitsumi LU005S";
- } else {
- sc->data.type = MCD_TYPE_LU006S;
- sc->data.name = "Mitsumi LU006S";
- }
- break;
- case 'F':
- sc->data.type = MCD_TYPE_FX001;
- sc->data.name = "Mitsumi FX001";
- break;
- case 'D':
- sc->data.type = MCD_TYPE_FX001D;
- sc->data.name = "Mitsumi FX001D";
- sc->data.read_command = MCD_CMDDOUBLESPEEDREAD;
- break;
- default:
- sc->data.type = MCD_TYPE_UNKNOWN;
- sc->data.name = "Mitsumi ???";
- break;
- }
-
- if (bootverbose)
- device_printf(sc->dev, "type %s, version info: %c %x\n",
- sc->data.name, stbytes[1], stbytes[2]);
-
- return (0);
-}
-
-
-static int
-mcd_waitrdy(struct mcd_softc *sc, int dly)
-{
- int i;
-
- /* wait until flag port senses status ready */
- for (i=0; i<dly; i+=MIN_DELAY) {
- if (!(MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL))
- return (0);
- DELAY(MIN_DELAY);
- }
- return (-1);
-}
-
-static int
-mcd_getreply(struct mcd_softc *sc, int dly)
-{
-
- /* wait data to become ready */
- if (mcd_waitrdy(sc, dly)<0) {
- device_printf(sc->dev, "timeout getreply\n");
- return (-1);
- }
-
- /* get the data */
- return (MCD_READ(sc, MCD_REG_STATUS) & 0xFF);
-}
-
-static int
-mcd_getstat(struct mcd_softc *sc, int sflg)
-{
- int i;
-
- /* get the status */
- if (sflg)
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDGETSTAT);
- i = mcd_getreply(sc, DELAY_GETREPLY);
- if (i<0 || (i & MCD_ST_CMDCHECK)) {
- sc->data.curr_mode = MCD_MD_UNKNOWN;
- return (-1);
- }
-
- sc->data.status = i;
-
- if (mcd_setflags(sc) < 0)
- return (-2);
- return (sc->data.status);
-}
-
-static int
-mcd_setflags(struct mcd_softc *sc)
-{
-
- /* check flags */
- if ( (sc->data.status & (MCDDSKCHNG|MCDDOOROPEN))
- || !(sc->data.status & MCDDSKIN)) {
- MCD_TRACE("setflags: sensed DSKCHNG or DOOROPEN or !DSKIN\n");
- mcd_soft_reset(sc);
- return (-1);
- }
-
- if (sc->data.status & MCDAUDIOBSY)
- sc->data.audio_status = CD_AS_PLAY_IN_PROGRESS;
- else if (sc->data.audio_status == CD_AS_PLAY_IN_PROGRESS)
- sc->data.audio_status = CD_AS_PLAY_COMPLETED;
- return (0);
-}
-
-static int
-mcd_get(struct mcd_softc *sc, char *buf, int nmax)
-{
- int i,k;
-
- for (i=0; i<nmax; i++) {
- /* wait for data */
- if ((k = mcd_getreply(sc, DELAY_GETREPLY)) < 0) {
- device_printf(sc->dev, "timeout mcd_get\n");
- return (-1);
- }
- buf[i] = k;
- }
- return (i);
-}
-
-static int
-mcd_send(struct mcd_softc *sc, int cmd,int nretrys)
-{
- int i,k=0;
-
-/*MCD_TRACE("mcd_send: command = 0x%02x\n",cmd,0,0,0);*/
- for (i=0; i<nretrys; i++) {
- MCD_WRITE(sc, MCD_REG_COMMAND, cmd);
- if ((k=mcd_getstat(sc, 0)) != -1)
- break;
- }
- if (k == -2) {
- device_printf(sc->dev, "media changed\n");
- return (-1);
- }
- if (i == nretrys) {
- device_printf(sc->dev, "mcd_send retry cnt exceeded\n");
- return (-1);
- }
-/*MCD_TRACE("mcd_send: done\n",0,0,0,0);*/
- return (0);
-}
-
-static void
-hsg2msf(int hsg, bcd_t *msf)
-{
- hsg += 150;
- F_msf(msf) = bin2bcd(hsg % 75);
- hsg /= 75;
- S_msf(msf) = bin2bcd(hsg % 60);
- hsg /= 60;
- M_msf(msf) = bin2bcd(hsg);
-}
-
-static int
-msf2hsg(bcd_t *msf, int relative)
-{
- return (bcd2bin(M_msf(msf)) * 60 + bcd2bin(S_msf(msf))) * 75 +
- bcd2bin(F_msf(msf)) - (!relative) * 150;
-}
-
-static int
-mcd_volinfo(struct mcd_softc *sc)
-{
-
- /* Just return if we already have it */
- if (sc->data.flags & MCDVOLINFO) return (0);
-
-/*MCD_TRACE("mcd_volinfo: enter\n",0,0,0,0);*/
-
- /* send volume info command */
- if (mcd_send(sc, MCD_CMDGETVOLINFO,MCD_RETRYS) < 0)
- return (EIO);
-
- /* get data */
- if (mcd_get(sc, (char*) &sc->data.volinfo,sizeof(struct mcd_volinfo)) < 0) {
- device_printf(sc->dev, "mcd_volinfo: error read data\n");
- return (EIO);
- }
-
- if (sc->data.volinfo.trk_low > 0 &&
- sc->data.volinfo.trk_high >= sc->data.volinfo.trk_low
- ) {
- sc->data.flags |= MCDVOLINFO; /* volinfo is OK */
- return (0);
- }
-
- return (EINVAL);
-}
-
-/* state machine to process read requests
- * initialize with MCD_S_BEGIN: calculate sizes, and read status
- * MCD_S_WAITSTAT: wait for status reply, set mode
- * MCD_S_WAITMODE: waits for status reply from set mode, set read command
- * MCD_S_WAITREAD: wait for read ready, read data
- */
-static void
-mcd_timeout(void *arg)
-{
- struct mcd_softc *sc;
-
- sc = (struct mcd_softc *)arg;
-
- MCD_ASSERT_LOCKED(sc);
- mcd_doread(sc, sc->ch_state, sc->ch_mbxsave);
-}
-
-static void
-mcd_doread(struct mcd_softc *sc, int state, struct mcd_mbx *mbxin)
-{
- struct mcd_mbx *mbx;
- struct bio *bp;
- int rm, i, k;
- struct mcd_read2 rbuf;
- int blknum;
- caddr_t addr;
-
- MCD_ASSERT_LOCKED(sc);
- mbx = (state!=MCD_S_BEGIN) ? sc->ch_mbxsave : mbxin;
- bp = mbx->bp;
-
-loop:
- switch (state) {
- case MCD_S_BEGIN:
- mbx = sc->ch_mbxsave = mbxin;
-
- case MCD_S_BEGIN1:
-retry_status:
- /* get status */
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDGETSTAT);
- mbx->count = RDELAY_WAITSTAT;
- sc->ch_state = MCD_S_WAITSTAT;
- callout_reset(&sc->timer, hz/100, mcd_timeout, sc); /* XXX */
- return;
- case MCD_S_WAITSTAT:
- sc->ch_state = MCD_S_WAITSTAT;
- callout_stop(&sc->timer);
- if (mbx->count-- >= 0) {
- if (MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL) {
- sc->ch_state = MCD_S_WAITSTAT;
- callout_reset(&sc->timer, hz/100,
- mcd_timeout, sc); /* XXX */
- return;
- }
- sc->data.status = MCD_READ(sc, MCD_REG_STATUS) & 0xFF;
- if (sc->data.status & MCD_ST_CMDCHECK)
- goto retry_status;
- if (mcd_setflags(sc) < 0)
- goto changed;
- MCD_TRACE("got WAITSTAT delay=%d\n",
- RDELAY_WAITSTAT-mbx->count);
- /* reject, if audio active */
- if (sc->data.status & MCDAUDIOBSY) {
- device_printf(sc->dev, "audio is active\n");
- goto readerr;
- }
-
-retry_mode:
- /* to check for raw/cooked mode */
- if (sc->data.flags & MCDREADRAW) {
- rm = MCD_MD_RAW;
- mbx->sz = MCDRBLK;
- } else {
- rm = MCD_MD_COOKED;
- mbx->sz = sc->data.blksize;
- }
-
- if (rm == sc->data.curr_mode)
- goto modedone;
-
- mbx->count = RDELAY_WAITMODE;
-
- sc->data.curr_mode = MCD_MD_UNKNOWN;
- mbx->mode = rm;
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDSETMODE);
- MCD_WRITE(sc, MCD_REG_COMMAND, rm);
-
- sc->ch_state = MCD_S_WAITMODE;
- callout_reset(&sc->timer, hz / 100, mcd_timeout, sc); /* XXX */
- return;
- } else {
- device_printf(sc->dev, "timeout getstatus\n");
- goto readerr;
- }
-
- case MCD_S_WAITMODE:
- sc->ch_state = MCD_S_WAITMODE;
- callout_stop(&sc->timer);
- if (mbx->count-- < 0) {
- device_printf(sc->dev, "timeout set mode\n");
- goto readerr;
- }
- if (MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL) {
- sc->ch_state = MCD_S_WAITMODE;
- callout_reset(&sc->timer, hz / 100, mcd_timeout, sc);
- return;
- }
- sc->data.status = MCD_READ(sc, MCD_REG_STATUS) & 0xFF;
- if (sc->data.status & MCD_ST_CMDCHECK) {
- sc->data.curr_mode = MCD_MD_UNKNOWN;
- goto retry_mode;
- }
- if (mcd_setflags(sc) < 0)
- goto changed;
- sc->data.curr_mode = mbx->mode;
- MCD_TRACE("got WAITMODE delay=%d\n",
- RDELAY_WAITMODE-mbx->count);
-modedone:
- /* for first block */
- mbx->nblk = howmany(bp->bio_bcount, mbx->sz);
- mbx->skip = 0;
-
-nextblock:
- blknum = bp->bio_offset / mbx->sz + mbx->skip/mbx->sz;
-
- MCD_TRACE("mcd_doread: read blknum=%d for bp=%p\n",
- blknum, bp);
-
- /* build parameter block */
- hsg2msf(blknum,rbuf.start_msf);
-retry_read:
- /* send the read command */
- MCD_WRITE(sc, MCD_REG_COMMAND, sc->data.read_command);
- MCD_WRITE(sc, MCD_REG_COMMAND, rbuf.start_msf[0]);
- MCD_WRITE(sc, MCD_REG_COMMAND, rbuf.start_msf[1]);
- MCD_WRITE(sc, MCD_REG_COMMAND, rbuf.start_msf[2]);
- MCD_WRITE(sc, MCD_REG_COMMAND, 0);
- MCD_WRITE(sc, MCD_REG_COMMAND, 0);
- MCD_WRITE(sc, MCD_REG_COMMAND, 1);
-
- /* Spin briefly (<= 2ms) to avoid missing next block */
- for (i = 0; i < 20; i++) {
- k = MCD_READ(sc, MCD_FLAGS);
- if (!(k & MFL_DATA_NOT_AVAIL))
- goto got_it;
- DELAY(100);
- }
-
- mbx->count = RDELAY_WAITREAD;
- sc->ch_state = MCD_S_WAITREAD;
- callout_reset(&sc->timer, hz / 100, mcd_timeout, sc); /* XXX */
- return;
- case MCD_S_WAITREAD:
- sc->ch_state = MCD_S_WAITREAD;
- callout_stop(&sc->timer);
- if (mbx->count-- > 0) {
- k = MCD_READ(sc, MCD_FLAGS);
- if (!(k & MFL_DATA_NOT_AVAIL)) { /* XXX */
- MCD_TRACE("got data delay=%d\n",
- RDELAY_WAITREAD-mbx->count);
- got_it:
- /* data is ready */
- addr = bp->bio_data + mbx->skip;
-
- MCD_WRITE(sc, MCD_REG_CTL2,0x04); /* XXX */
- for (i=0; i<mbx->sz; i++)
- *addr++ = MCD_READ(sc, MCD_REG_RDATA);
- MCD_WRITE(sc, MCD_REG_CTL2,0x0c); /* XXX */
-
- k = MCD_READ(sc, MCD_FLAGS);
- /* If we still have some junk, read it too */
- if (!(k & MFL_DATA_NOT_AVAIL)) {
- MCD_WRITE(sc, MCD_REG_CTL2, 0x04); /* XXX */
- (void)MCD_READ(sc, MCD_REG_RDATA);
- (void)MCD_READ(sc, MCD_REG_RDATA);
- MCD_WRITE(sc, MCD_REG_CTL2, 0x0c); /* XXX */
- }
-
- if (--mbx->nblk > 0) {
- mbx->skip += mbx->sz;
- goto nextblock;
- }
-
- /* return buffer */
- bp->bio_resid = 0;
- biodone(bp);
-
- sc->data.flags &= ~(MCDMBXBSY|MCDREADRAW);
- mcd_start(sc);
- return;
- }
- if (!(k & MFL_STATUS_NOT_AVAIL)) {
- sc->data.status = MCD_READ(sc, MCD_REG_STATUS) & 0xFF;
- if (sc->data.status & MCD_ST_CMDCHECK)
- goto retry_read;
- if (mcd_setflags(sc) < 0)
- goto changed;
- }
- sc->ch_state = MCD_S_WAITREAD;
- callout_reset(&sc->timer, hz / 100, mcd_timeout, sc); /* XXX */
- return;
- } else {
- device_printf(sc->dev, "timeout read data\n");
- goto readerr;
- }
- }
-
-readerr:
- if (mbx->retry-- > 0) {
- device_printf(sc->dev, "retrying\n");
- state = MCD_S_BEGIN1;
- goto loop;
- }
-harderr:
- /* invalidate the buffer */
- bp->bio_flags |= BIO_ERROR;
- bp->bio_resid = bp->bio_bcount;
- biodone(bp);
-
- sc->data.flags &= ~(MCDMBXBSY|MCDREADRAW);
- mcd_start(sc);
- return;
-
-changed:
- device_printf(sc->dev, "media changed\n");
- goto harderr;
-
-#ifdef NOTDEF
- device_printf(sc->dev, "unit timeout, resetting\n");
- MCD_WRITE(sc, MCD_REG_RESET, MCD_CMDRESET);
- DELAY(300000);
- (void)mcd_getstat(sc, 1);
- (void)mcd_getstat(sc, 1);
- /*sc->data.status &= ~MCDDSKCHNG; */
- sc->data.debug = 1; /* preventive set debug mode */
-
-#endif
-
-}
-
-static int
-mcd_lock_door(struct mcd_softc *sc, int lock)
-{
-
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDLOCKDRV);
- MCD_WRITE(sc, MCD_REG_COMMAND, lock);
- if (mcd_getstat(sc, 0) == -1)
- return (EIO);
- return (0);
-}
-
-static int
-mcd_close_tray(struct mcd_softc *sc)
-{
- int retry, r;
-
- if (mcd_getstat(sc, 1) == -1)
- return (EIO);
- if (sc->data.status & MCDDOOROPEN) {
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDCLOSETRAY);
- for (retry = 0; retry < CLOSE_TRAY_SECS * WAIT_FRAC; retry++) {
- if (MCD_READ(sc, MCD_FLAGS) & MFL_STATUS_NOT_AVAIL)
- (void) mtx_sleep(sc, &sc->mtx, PSOCK | PCATCH,
- "mcdcls", hz/WAIT_FRAC);
- else {
- if ((r = mcd_getstat(sc, 0)) == -1)
- return (EIO);
- return (0);
- }
- }
- return (ENXIO);
- }
- return (0);
-}
-
-static int
-mcd_eject(struct mcd_softc *sc)
-{
- int r;
-
- if (mcd_getstat(sc, 1) == -1) /* detect disk change too */
- return (EIO);
- if (sc->data.status & MCDDOOROPEN)
- return (0);
- if ((r = mcd_stop(sc)) == EIO)
- return (r);
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDEJECTDISK);
- if (mcd_getstat(sc, 0) == -1)
- return (EIO);
- return (0);
-}
-
-static int
-mcd_inject(struct mcd_softc *sc)
-{
-
- if (mcd_getstat(sc, 1) == -1) /* detect disk change too */
- return (EIO);
- if (sc->data.status & MCDDOOROPEN)
- return mcd_close_tray(sc);
- return (0);
-}
-
-static int
-mcd_hard_reset(struct mcd_softc *sc)
-{
-
- MCD_WRITE(sc, MCD_REG_RESET, MCD_CMDRESET);
- sc->data.curr_mode = MCD_MD_UNKNOWN;
- sc->data.audio_status = CD_AS_AUDIO_INVALID;
- return (0);
-}
-
-static void
-mcd_soft_reset(struct mcd_softc *sc)
-{
-
- sc->data.flags &= (MCDINIT|MCDPROBING|MCDNEWMODEL);
- sc->data.curr_mode = MCD_MD_UNKNOWN;
- sc->data.partflags = 0;
- sc->data.audio_status = CD_AS_AUDIO_INVALID;
-}
-
-static int
-mcd_setmode(struct mcd_softc *sc, int mode)
-{
- int retry, st;
-
- if (sc->data.curr_mode == mode)
- return (0);
- if (sc->data.debug)
- device_printf(sc->dev, "setting mode to %d\n", mode);
- for(retry=0; retry<MCD_RETRYS; retry++)
- {
- sc->data.curr_mode = MCD_MD_UNKNOWN;
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDSETMODE);
- MCD_WRITE(sc, MCD_REG_COMMAND, mode);
- if ((st = mcd_getstat(sc, 0)) >= 0) {
- sc->data.curr_mode = mode;
- return (0);
- }
- if (st == -2) {
- device_printf(sc->dev, "media changed\n");
- break;
- }
- }
-
- return (-1);
-}
-
-static int
-mcd_toc_header(struct mcd_softc *sc, struct ioc_toc_header *th)
-{
- int r;
-
- if ((r = mcd_volinfo(sc)) != 0)
- return (r);
-
- th->starting_track = bcd2bin(sc->data.volinfo.trk_low);
- th->ending_track = bcd2bin(sc->data.volinfo.trk_high);
- th->len = 2 * sizeof(u_char) /* start & end tracks */ +
- (th->ending_track + 1 - th->starting_track + 1) *
- sizeof(struct cd_toc_entry);
-
- return (0);
-}
-
-static int
-mcd_read_toc(struct mcd_softc *sc)
-{
- struct ioc_toc_header th;
- struct mcd_qchninfo q;
- int rc, trk, idx, retry;
-
- /* Only read TOC if needed */
- if (sc->data.flags & MCDTOC)
- return (0);
-
- if (sc->data.debug)
- device_printf(sc->dev, "reading toc header\n");
-
- if ((rc = mcd_toc_header(sc, &th)) != 0)
- return (rc);
-
- if (mcd_send(sc, MCD_CMDSTOPAUDIO, MCD_RETRYS) < 0)
- return (EIO);
-
- if (mcd_setmode(sc, MCD_MD_TOC) != 0)
- return (EIO);
-
- if (sc->data.debug)
- device_printf(sc->dev, "get_toc reading qchannel info\n");
-
- for(trk=th.starting_track; trk<=th.ending_track; trk++)
- sc->data.toc[trk].idx_no = 0;
- trk = th.ending_track - th.starting_track + 1;
- for(retry=0; retry<600 && trk>0; retry++)
- {
- if (mcd_getqchan(sc, &q) < 0) break;
- idx = bcd2bin(q.idx_no);
- if (idx>=th.starting_track && idx<=th.ending_track && q.trk_no==0) {
- if (sc->data.toc[idx].idx_no == 0) {
- sc->data.toc[idx] = q;
- trk--;
- }
- }
- }
-
- if (mcd_setmode(sc, MCD_MD_COOKED) != 0)
- return (EIO);
-
- if (trk != 0)
- return (ENXIO);
-
- /* add a fake last+1 */
- idx = th.ending_track + 1;
- sc->data.toc[idx].control = sc->data.toc[idx-1].control;
- sc->data.toc[idx].addr_type = sc->data.toc[idx-1].addr_type;
- sc->data.toc[idx].trk_no = 0;
- sc->data.toc[idx].idx_no = MCD_LASTPLUS1;
- sc->data.toc[idx].hd_pos_msf[0] = sc->data.volinfo.vol_msf[0];
- sc->data.toc[idx].hd_pos_msf[1] = sc->data.volinfo.vol_msf[1];
- sc->data.toc[idx].hd_pos_msf[2] = sc->data.volinfo.vol_msf[2];
-
- if (sc->data.debug)
- { int i;
- for (i = th.starting_track; i <= idx; i++)
- device_printf(sc->dev, "trk %d idx %d pos %d %d %d\n",
- i,
- sc->data.toc[i].idx_no > 0x99 ? sc->data.toc[i].idx_no :
- bcd2bin(sc->data.toc[i].idx_no),
- bcd2bin(sc->data.toc[i].hd_pos_msf[0]),
- bcd2bin(sc->data.toc[i].hd_pos_msf[1]),
- bcd2bin(sc->data.toc[i].hd_pos_msf[2]));
- }
-
- sc->data.flags |= MCDTOC;
-
- return (0);
-}
-
-#if 0
-static int
-mcd_toc_entry(struct mcd_softc *sc, struct ioc_read_toc_single_entry *te)
-{
- struct ioc_toc_header th;
- int rc, trk;
-
- if (te->address_format != CD_MSF_FORMAT
- && te->address_format != CD_LBA_FORMAT)
- return (EINVAL);
-
- /* Copy the toc header */
- if ((rc = mcd_toc_header(sc, &th)) != 0)
- return (rc);
-
- /* verify starting track */
- trk = te->track;
- if (trk == 0)
- trk = th.starting_track;
- else if (trk == MCD_LASTPLUS1)
- trk = th.ending_track + 1;
- else if (trk < th.starting_track || trk > th.ending_track + 1)
- return (EINVAL);
-
- /* Make sure we have a valid toc */
- if ((rc=mcd_read_toc(sc)) != 0)
- return (rc);
-
- /* Copy the TOC data. */
- if (sc->data.toc[trk].idx_no == 0)
- return (EIO);
-
- te->entry.control = sc->data.toc[trk].control;
- te->entry.addr_type = sc->data.toc[trk].addr_type;
- te->entry.track =
- sc->data.toc[trk].idx_no > 0x99 ? sc->data.toc[trk].idx_no :
- bcd2bin(sc->data.toc[trk].idx_no);
- switch (te->address_format) {
- case CD_MSF_FORMAT:
- te->entry.addr.msf.unused = 0;
- te->entry.addr.msf.minute = bcd2bin(sc->data.toc[trk].hd_pos_msf[0]);
- te->entry.addr.msf.second = bcd2bin(sc->data.toc[trk].hd_pos_msf[1]);
- te->entry.addr.msf.frame = bcd2bin(sc->data.toc[trk].hd_pos_msf[2]);
- break;
- case CD_LBA_FORMAT:
- te->entry.addr.lba = htonl(msf2hsg(sc->data.toc[trk].hd_pos_msf, 0));
- break;
- }
- return (0);
-}
-#endif
-
-static int
-mcd_toc_entrys(struct mcd_softc *sc, struct ioc_read_toc_entry *te)
-{
- struct cd_toc_entry entries[MCD_MAXTOCS];
- struct ioc_toc_header th;
- int rc, n, trk, len;
-
- if ( te->data_len < sizeof(entries[0])
- || (te->data_len % sizeof(entries[0])) != 0
- || (te->address_format != CD_MSF_FORMAT
- && te->address_format != CD_LBA_FORMAT)
- )
- return (EINVAL);
-
- /* Copy the toc header */
- if ((rc = mcd_toc_header(sc, &th)) != 0)
- return (rc);
-
- /* verify starting track */
- trk = te->starting_track;
- if (trk == 0)
- trk = th.starting_track;
- else if (trk == MCD_LASTPLUS1)
- trk = th.ending_track + 1;
- else if (trk < th.starting_track || trk > th.ending_track + 1)
- return (EINVAL);
-
- len = ((th.ending_track + 1 - trk) + 1) *
- sizeof(entries[0]);
- if (te->data_len < len)
- len = te->data_len;
- if (len > sizeof(entries))
- return (EINVAL);
-
- /* Make sure we have a valid toc */
- if ((rc=mcd_read_toc(sc)) != 0)
- return (rc);
-
- /* Copy the TOC data. */
- for (n = 0; len > 0 && trk <= th.ending_track + 1; trk++) {
- if (sc->data.toc[trk].idx_no == 0)
- continue;
- entries[n].control = sc->data.toc[trk].control;
- entries[n].addr_type = sc->data.toc[trk].addr_type;
- entries[n].track =
- sc->data.toc[trk].idx_no > 0x99 ? sc->data.toc[trk].idx_no :
- bcd2bin(sc->data.toc[trk].idx_no);
- switch (te->address_format) {
- case CD_MSF_FORMAT:
- entries[n].addr.msf.unused = 0;
- entries[n].addr.msf.minute = bcd2bin(sc->data.toc[trk].hd_pos_msf[0]);
- entries[n].addr.msf.second = bcd2bin(sc->data.toc[trk].hd_pos_msf[1]);
- entries[n].addr.msf.frame = bcd2bin(sc->data.toc[trk].hd_pos_msf[2]);
- break;
- case CD_LBA_FORMAT:
- entries[n].addr.lba = htonl(msf2hsg(sc->data.toc[trk].hd_pos_msf, 0));
- break;
- }
- len -= sizeof(struct cd_toc_entry);
- n++;
- }
-
- /* copy the data back */
- MCD_UNLOCK(sc);
- return copyout(entries, te->data, n * sizeof(struct cd_toc_entry));
-}
-
-static int
-mcd_stop(struct mcd_softc *sc)
-{
-
- /* Verify current status */
- if (sc->data.audio_status != CD_AS_PLAY_IN_PROGRESS &&
- sc->data.audio_status != CD_AS_PLAY_PAUSED &&
- sc->data.audio_status != CD_AS_PLAY_COMPLETED) {
- if (sc->data.debug)
- device_printf(sc->dev,
- "stop attempted when not playing, audio status %d\n",
- sc->data.audio_status);
- return (EINVAL);
- }
- if (sc->data.audio_status == CD_AS_PLAY_IN_PROGRESS)
- if (mcd_send(sc, MCD_CMDSTOPAUDIO, MCD_RETRYS) < 0)
- return (EIO);
- sc->data.audio_status = CD_AS_PLAY_COMPLETED;
- return (0);
-}
-
-static int
-mcd_getqchan(struct mcd_softc *sc, struct mcd_qchninfo *q)
-{
-
- if (mcd_send(sc, MCD_CMDGETQCHN, MCD_RETRYS) < 0)
- return (-1);
- if (mcd_get(sc, (char *) q, sizeof(struct mcd_qchninfo)) < 0)
- return (-1);
- if (sc->data.debug) {
- device_printf(sc->dev,
- "getqchan control=0x%x addr_type=0x%x trk=%d ind=%d ttm=%d:%d.%d dtm=%d:%d.%d\n",
- q->control, q->addr_type,
- bcd2bin(q->trk_no),
- bcd2bin(q->idx_no),
- bcd2bin(q->trk_size_msf[0]),
- bcd2bin(q->trk_size_msf[1]),
- bcd2bin(q->trk_size_msf[2]),
- bcd2bin(q->hd_pos_msf[0]),
- bcd2bin(q->hd_pos_msf[1]),
- bcd2bin(q->hd_pos_msf[2]));
- }
- return (0);
-}
-
-static int
-mcd_subchan(struct mcd_softc *sc, struct ioc_read_subchannel *sch, int nocopyout)
-{
- struct mcd_qchninfo q;
- struct cd_sub_channel_info data;
- int lba;
-
- if (sc->data.debug)
- device_printf(sc->dev, "subchan af=%d, df=%d\n",
- sch->address_format,
- sch->data_format);
-
- if (sch->address_format != CD_MSF_FORMAT &&
- sch->address_format != CD_LBA_FORMAT)
- return (EINVAL);
-
- if (sch->data_format != CD_CURRENT_POSITION &&
- sch->data_format != CD_MEDIA_CATALOG)
- return (EINVAL);
-
- if (mcd_setmode(sc, MCD_MD_COOKED) != 0)
- return (EIO);
-
- if (mcd_getqchan(sc, &q) < 0)
- return (EIO);
-
- data.header.audio_status = sc->data.audio_status;
- data.what.position.data_format = sch->data_format;
-
- switch (sch->data_format) {
- case CD_MEDIA_CATALOG:
- data.what.media_catalog.mc_valid = 1;
- data.what.media_catalog.mc_number[0] = '\0';
- break;
-
- case CD_CURRENT_POSITION:
- data.what.position.control = q.control;
- data.what.position.addr_type = q.addr_type;
- data.what.position.track_number = bcd2bin(q.trk_no);
- data.what.position.index_number = bcd2bin(q.idx_no);
- switch (sch->address_format) {
- case CD_MSF_FORMAT:
- data.what.position.reladdr.msf.unused = 0;
- data.what.position.reladdr.msf.minute = bcd2bin(q.trk_size_msf[0]);
- data.what.position.reladdr.msf.second = bcd2bin(q.trk_size_msf[1]);
- data.what.position.reladdr.msf.frame = bcd2bin(q.trk_size_msf[2]);
- data.what.position.absaddr.msf.unused = 0;
- data.what.position.absaddr.msf.minute = bcd2bin(q.hd_pos_msf[0]);
- data.what.position.absaddr.msf.second = bcd2bin(q.hd_pos_msf[1]);
- data.what.position.absaddr.msf.frame = bcd2bin(q.hd_pos_msf[2]);
- break;
- case CD_LBA_FORMAT:
- lba = msf2hsg(q.trk_size_msf, 1);
- /*
- * Pre-gap has index number of 0, and decreasing MSF
- * address. Must be converted to negative LBA, per
- * SCSI spec.
- */
- if (data.what.position.index_number == 0)
- lba = -lba;
- data.what.position.reladdr.lba = htonl(lba);
- data.what.position.absaddr.lba = htonl(msf2hsg(q.hd_pos_msf, 0));
- break;
- }
- break;
- }
-
- MCD_UNLOCK(sc);
- if (nocopyout == 0)
- return copyout(&data, sch->data, min(sizeof(struct cd_sub_channel_info), sch->data_len));
- bcopy(&data, sch->data, min(sizeof(struct cd_sub_channel_info), sch->data_len));
- return (0);
-}
-
-static int
-mcd_playmsf(struct mcd_softc *sc, struct ioc_play_msf *p)
-{
- struct mcd_read2 pb;
-
- if (sc->data.debug)
- device_printf(sc->dev, "playmsf: from %d:%d.%d to %d:%d.%d\n",
- p->start_m, p->start_s, p->start_f,
- p->end_m, p->end_s, p->end_f);
-
- if ((p->start_m * 60 * 75 + p->start_s * 75 + p->start_f) >=
- (p->end_m * 60 * 75 + p->end_s * 75 + p->end_f) ||
- (p->end_m * 60 * 75 + p->end_s * 75 + p->end_f) >
- M_msf(sc->data.volinfo.vol_msf) * 60 * 75 +
- S_msf(sc->data.volinfo.vol_msf) * 75 +
- F_msf(sc->data.volinfo.vol_msf))
- return (EINVAL);
-
- pb.start_msf[0] = bin2bcd(p->start_m);
- pb.start_msf[1] = bin2bcd(p->start_s);
- pb.start_msf[2] = bin2bcd(p->start_f);
- pb.end_msf[0] = bin2bcd(p->end_m);
- pb.end_msf[1] = bin2bcd(p->end_s);
- pb.end_msf[2] = bin2bcd(p->end_f);
-
- if (mcd_setmode(sc, MCD_MD_COOKED) != 0)
- return (EIO);
-
- return mcd_play(sc, &pb);
-}
-
-static int
-mcd_playtracks(struct mcd_softc *sc, struct ioc_play_track *pt)
-{
- struct mcd_read2 pb;
- int a = pt->start_track;
- int z = pt->end_track;
- int rc, i;
-
- if ((rc = mcd_read_toc(sc)) != 0)
- return (rc);
-
- if (sc->data.debug)
- device_printf(sc->dev, "playtracks from %d:%d to %d:%d\n",
- a, pt->start_index, z, pt->end_index);
-
- if ( a < bcd2bin(sc->data.volinfo.trk_low)
- || a > bcd2bin(sc->data.volinfo.trk_high)
- || a > z
- || z < bcd2bin(sc->data.volinfo.trk_low)
- || z > bcd2bin(sc->data.volinfo.trk_high))
- return (EINVAL);
-
- for (i = 0; i < 3; i++) {
- pb.start_msf[i] = sc->data.toc[a].hd_pos_msf[i];
- pb.end_msf[i] = sc->data.toc[z+1].hd_pos_msf[i];
- }
-
- if (mcd_setmode(sc, MCD_MD_COOKED) != 0)
- return (EIO);
-
- return mcd_play(sc, &pb);
-}
-
-static int
-mcd_playblocks(struct mcd_softc *sc, struct ioc_play_blocks *p)
-{
- struct mcd_read2 pb;
-
- if (sc->data.debug)
- device_printf(sc->dev, "playblocks: blkno %d length %d\n",
- p->blk, p->len);
-
- if (p->blk > sc->data.disksize || p->len > sc->data.disksize ||
- p->blk < 0 || p->len < 0 ||
- (p->blk + p->len) > sc->data.disksize)
- return (EINVAL);
-
- hsg2msf(p->blk, pb.start_msf);
- hsg2msf(p->blk + p->len, pb.end_msf);
-
- if (mcd_setmode(sc, MCD_MD_COOKED) != 0)
- return (EIO);
-
- return mcd_play(sc, &pb);
-}
-
-static int
-mcd_play(struct mcd_softc *sc, struct mcd_read2 *pb)
-{
- int retry, st = -1, status;
-
- sc->data.lastpb = *pb;
- for(retry=0; retry<MCD_RETRYS; retry++) {
-
- critical_enter();
- MCD_WRITE(sc, MCD_REG_COMMAND, MCD_CMDSINGLESPEEDREAD);
- MCD_WRITE(sc, MCD_REG_COMMAND, pb->start_msf[0]);
- MCD_WRITE(sc, MCD_REG_COMMAND, pb->start_msf[1]);
- MCD_WRITE(sc, MCD_REG_COMMAND, pb->start_msf[2]);
- MCD_WRITE(sc, MCD_REG_COMMAND, pb->end_msf[0]);
- MCD_WRITE(sc, MCD_REG_COMMAND, pb->end_msf[1]);
- MCD_WRITE(sc, MCD_REG_COMMAND, pb->end_msf[2]);
- critical_exit();
-
- status=mcd_getstat(sc, 0);
- if (status == -1)
- continue;
- else if (status != -2)
- st = 0;
- break;
- }
-
- if (status == -2) {
- device_printf(sc->dev, "media changed\n");
- return (ENXIO);
- }
- if (sc->data.debug)
- device_printf(sc->dev,
- "mcd_play retry=%d, status=0x%02x\n", retry, status);
- if (st < 0)
- return (ENXIO);
- sc->data.audio_status = CD_AS_PLAY_IN_PROGRESS;
- return (0);
-}
-
-static int
-mcd_pause(struct mcd_softc *sc)
-{
- struct mcd_qchninfo q;
- int rc;
-
- /* Verify current status */
- if (sc->data.audio_status != CD_AS_PLAY_IN_PROGRESS &&
- sc->data.audio_status != CD_AS_PLAY_PAUSED) {
- if (sc->data.debug)
- device_printf(sc->dev,
- "pause attempted when not playing, audio status %d\n",
- sc->data.audio_status);
- return (EINVAL);
- }
-
- /* Get the current position */
- if (mcd_getqchan(sc, &q) < 0)
- return (EIO);
-
- /* Copy it into lastpb */
- sc->data.lastpb.start_msf[0] = q.hd_pos_msf[0];
- sc->data.lastpb.start_msf[1] = q.hd_pos_msf[1];
- sc->data.lastpb.start_msf[2] = q.hd_pos_msf[2];
-
- /* Stop playing */
- if ((rc=mcd_stop(sc)) != 0)
- return (rc);
-
- /* Set the proper status and exit */
- sc->data.audio_status = CD_AS_PLAY_PAUSED;
- return (0);
-}
-
-static int
-mcd_resume(struct mcd_softc *sc)
-{
-
- if (sc->data.audio_status != CD_AS_PLAY_PAUSED)
- return (EINVAL);
- return mcd_play(sc, &sc->data.lastpb);
-}
diff --git a/sys/dev/mcd/mcd_isa.c b/sys/dev/mcd/mcd_isa.c
deleted file mode 100644
index 92e8adb57192..000000000000
--- a/sys/dev/mcd/mcd_isa.c
+++ /dev/null
@@ -1,202 +0,0 @@
-/*
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include <sys/param.h>
-#include <sys/systm.h>
-#include <sys/kernel.h>
-#include <sys/module.h>
-#include <sys/conf.h>
-#include <sys/fcntl.h>
-#include <sys/bio.h>
-#include <sys/cdio.h>
-#include <sys/bus.h>
-
-#include <sys/lock.h>
-#include <sys/mutex.h>
-
-#include <machine/bus.h>
-#include <machine/resource.h>
-#include <sys/rman.h>
-
-#include <isa/isavar.h>
-
-#include <dev/mcd/mcdreg.h>
-#include <dev/mcd/mcdvar.h>
-
-static int mcd_isa_probe (device_t);
-static int mcd_isa_attach (device_t);
-static int mcd_isa_detach (device_t);
-
-static int mcd_alloc_resources (device_t);
-static void mcd_release_resources (device_t);
-
-static int
-mcd_isa_probe (device_t dev)
-{
- struct mcd_softc * sc;
- int error;
-
- /* No pnp support */
- if (isa_get_vendorid(dev))
- return (ENXIO);
-
- /* IO port must be configured. */
- if (bus_get_resource_start(dev, SYS_RES_IOPORT, 0) == 0)
- return (ENXIO);
-
- sc = device_get_softc(dev);
- sc->dev = dev;
- sc->port_rid = 0;
- sc->port_type = SYS_RES_IOPORT;
- error = mcd_alloc_resources(dev);
- if (error)
- goto fail;
-
- error = mcd_probe(sc);
- if (error) {
- device_printf(dev, "Probe failed.\n");
- goto fail;
- }
-
- device_set_desc(dev, sc->data.name);
-
-fail:
- mcd_release_resources(dev);
- return (error);
-}
-
-static int
-mcd_isa_attach (device_t dev)
-{
- struct mcd_softc * sc;
- int error;
-
- sc = device_get_softc(dev);
- error = 0;
-
- sc->dev = dev;
- sc->port_rid = 0;
- sc->port_type = SYS_RES_IOPORT;
- error = mcd_alloc_resources(dev);
- if (error)
- goto fail;
-
- error = mcd_probe(sc);
- if (error) {
- device_printf(dev, "Re-Probe failed.\n");
- goto fail;
- }
-
- error = mcd_attach(sc);
- if (error) {
- device_printf(dev, "Attach failed.\n");
- goto fail;
- }
-
- return (0);
-fail:
- mcd_release_resources(dev);
- return (error);
-}
-
-static int
-mcd_isa_detach (device_t dev)
-{
- struct mcd_softc * sc;
- int error;
-
- sc = device_get_softc(dev);
- error = 0;
-
- destroy_dev(sc->mcd_dev_t);
-
- mcd_release_resources(dev);
-
- return (error);
-}
-
-static int
-mcd_alloc_resources (device_t dev)
-{
- struct mcd_softc * sc;
- int error;
-
- sc = device_get_softc(dev);
- error = 0;
- mtx_init(&sc->mtx, "mcd", NULL, MTX_DEF);
-
- if (sc->port_type) {
- sc->port = bus_alloc_resource_any(dev, sc->port_type,
- &sc->port_rid, RF_ACTIVE);
- if (sc->port == NULL) {
- device_printf(dev, "Unable to allocate PORT resource.\n");
- error = ENOMEM;
- goto bad;
- }
- }
-
- if (sc->irq_type) {
- sc->irq = bus_alloc_resource_any(dev, sc->irq_type,
- &sc->irq_rid, RF_ACTIVE);
- if (sc->irq == NULL) {
- device_printf(dev, "Unable to allocate IRQ resource.\n");
- error = ENOMEM;
- goto bad;
- }
- }
-
- if (sc->drq_type) {
- sc->drq = bus_alloc_resource_any(dev, sc->drq_type,
- &sc->drq_rid, RF_ACTIVE);
- if (sc->drq == NULL) {
- device_printf(dev, "Unable to allocate DRQ resource.\n");
- error = ENOMEM;
- goto bad;
- }
- }
-
-bad:
- return (error);
-}
-
-static void
-mcd_release_resources (device_t dev)
-{
- struct mcd_softc * sc;
-
- sc = device_get_softc(dev);
-
- if (sc->irq_ih)
- bus_teardown_intr(dev, sc->irq, sc->irq_ih);
- if (sc->port)
- bus_release_resource(dev, sc->port_type, sc->port_rid, sc->port);
- if (sc->irq)
- bus_release_resource(dev, sc->irq_type, sc->irq_rid, sc->irq);
- if (sc->drq)
- bus_release_resource(dev, sc->drq_type, sc->drq_rid, sc->drq);
-
- mtx_destroy(&sc->mtx);
-
- return;
-}
-
-static device_method_t mcd_isa_methods[] = {
- DEVMETHOD(device_probe, mcd_isa_probe),
- DEVMETHOD(device_attach, mcd_isa_attach),
- DEVMETHOD(device_detach, mcd_isa_detach),
-
- { 0, 0 }
-};
-
-static driver_t mcd_isa_driver = {
- "mcd",
- mcd_isa_methods,
- sizeof(struct mcd_softc)
-};
-
-static devclass_t mcd_devclass;
-
-DRIVER_MODULE(mcd, isa, mcd_isa_driver, mcd_devclass, NULL, 0);
diff --git a/sys/dev/mcd/mcdreg.h b/sys/dev/mcd/mcdreg.h
deleted file mode 100644
index 451fc5de76c3..000000000000
--- a/sys/dev/mcd/mcdreg.h
+++ /dev/null
@@ -1,219 +0,0 @@
-/*-
- * Copyright 1993 by Holger Veit (data part)
- * Copyright 1993 by Brian Moore (audio part)
- * Changes Copyright 1993 by Gary Clark II
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * This software was developed by Holger Veit and Brian Moore
- * for use with "386BSD" and similar operating systems.
- * "Similar operating systems" includes mainly non-profit oriented
- * systems for research and education, including but not restricted to
- * "NetBSD", "FreeBSD", "Mach" (by CMU).
- * 4. Neither the name of the developer(s) nor the name "386BSD"
- * may be used to endorse or promote products derived from this
- * software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE DEVELOPER(S) ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE DEVELOPER(S) BE
- * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
- * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
- * OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
- * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
- * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
- * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
- * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * This file contains definitions for some cdrom control commands
- * and status codes. This info was "inherited" from the DOS MTMCDE.SYS
- * driver, and is thus not complete (and may even be wrong). Some day
- * the manufacturer or anyone else might provide better documentation,
- * so this file (and the driver) will then have a better quality.
- *
- * $FreeBSD$
- */
-
-#ifndef MCD_H
-#define MCD_H
-
-/* toc */
-#define MCD_MAXTOCS 104 /* from the Linux driver */
-#define MCD_LASTPLUS1 170 /* special toc entry */
-
-typedef unsigned char bcd_t;
-#define M_msf(msf) msf[0]
-#define S_msf(msf) msf[1]
-#define F_msf(msf) msf[2]
-
-/* io lines used */
-#define MCD_IO_BASE 0x300
-
-#define MCD_REG_COMMAND 0
-#define MCD_REG_STATUS 0
-#define MCD_REG_RDATA 0
-
-#define MCD_REG_RESET 1
-#define MCD_REG_CTL2 2 /* XXX Is this right? */
-#define MCD_REG_CONFIG 3
-
-#define MCD_MASK_DMA 0x07 /* bits 2-0 = DMA channel */
-#define MCD_MASK_IRQ 0x70 /* bits 6-4 = INT number */
- /* 001 = int 2,9 */
- /* 010 = int 3 */
- /* 011 = int 5 */
- /* 100 = int 10 */
- /* 101 = int 11 */
-/* flags */
-#define MFL_DATA_NOT_AVAIL 0x02
-#define MFL_STATUS_NOT_AVAIL 0x04
-
-/* New Commands */
-#define M_RESET 0x00
-#define M_PICKLE 0x04
-
-/* ports */
-#define MCD_DATA 0
-#define MCD_FLAGS 1
-#define MCD_CTRL 2
-#define CHANNEL 3 /* XXX ??? */
-
-/* Status bits */
-#define MCD_ST_DOOROPEN 0x80
-#define MCD_ST_DSKIN 0x40
-#define MCD_ST_DSKCHNG 0x20
-#define MCD_ST_SPINNING 0x10
-#define MCD_ST_AUDIODISK 0x08 /* Audio Disk is in */
-#define MCD_ST_BUSY 0x04
-#define MCD_ST_AUDIOBSY 0x02 /* Audio Disk is Playing */
-#define MCD_ST_CMDCHECK 0x01 /* Command error */
-
-/* commands known by the controller */
-#define MCD_CMDRESET 0x00
-#define MCD_CMDGETVOLINFO 0x10 /* gets mcd_volinfo */
-#define MCD_CMDGETDISKINFO 0x11 /* gets mcd_disk information */
-#define MCD_CMDGETQCHN 0x20 /* gets mcd_qchninfo */
-#define MCD_CMDGETSENSE 0x30 /* gets sense info */
-#define MCD_CMDGETSTAT 0x40 /* gets a byte of status */
-
-#define MCD_CMDSETMODE 0x50 /* set transmission mode, needs byte */
-
-#define MCD_MDBIT_TESTMODE 0x80 /* 0 = DATALENGTH setting is valid */
-#define MCD_MDBIT_DATALENGTH 0x40 /* 0 = Read User Data Only */
- /* 1 = Read Raw sectors (2352 bytes) */
-
-#define MCDBLK 2048 /* for cooked mode */
-#define MCDRBLK sizeof(struct mcd_rawsector) /* for raw mode */
-
-#define MCD_MDBIT_ECCMODE 0x20 /* 0 = Use secondary correction */
- /* 1 = Don't use secondary ECC */
-#define MCD_MDBIT_SPINDOWN 0x08 /* 0 = Spin Up, 1 = Spin Down */
-#define MCD_MDBIT_GET_TOC 0x04 /* 0 = Get UPC on next GETQCHAN */
- /* 1 = Get TOC on GETQCHAN */
-#define MCD_MDBIT_MUTEDATA 0x01 /* 1 = Don't play back Data as audio */
-
-#define MCD_MD_RAW (MCD_MDBIT_DATALENGTH|MCD_MDBIT_ECCMODE|MCD_MDBIT_MUTEDATA)
-#define MCD_MD_COOKED (MCD_MDBIT_MUTEDATA)
-#define MCD_MD_TOC (MCD_MDBIT_GET_TOC|MCD_MDBIT_MUTEDATA)
-
-#define MCD_CMDSTOPAUDIO 0x70
-#define MCD_CMDSTOPAUDIOTIME 0x80
-#define MCD_CMDGETVOLUME 0x8E /* gets mcd_volume */
-#define MCD_CMDSETDRIVEMODE 0xA0 /* Set drive mode */
-#define MCD_READUPC 0xA2 /* Get UPC info */
-#define MCD_CMDSETVOLUME 0xAE /* sets mcd_volume */
-#define MCD_CMDREAD1 0xB0 /* read n sectors */
-#define MCD_CMDSINGLESPEEDREAD 0xC0 /* read from-to */
-#define MCD_CMDSTARTAUDIOMSF 0xC1 /* read audio data */
-#define MCD_CMDDOUBLESPEEDREAD 0xC1 /* Read lots of data from the drive */
-#define MCD_CMDGETDRIVEMODE 0xC2 /* Get the drive mode */
-#define MCD_CMDREAD 0xC3 /* Read data from the drive */
-#define MCD_CMDSETINTERLEAVE 0xC8 /* Adjust the interleave */
-#define MCD_CMDCONTINFO 0xDC /* Get controller info */
-#define MCD_CMDSTOP 0xF0 /* Stop everything */
-#define MCD_CMDEJECTDISK 0xF6
-#define MCD_CMDCLOSETRAY 0xF8
-
-#define MCD_CMDLOCKDRV 0xFE /* needs byte */
-#define MCD_LK_UNLOCK 0x00
-#define MCD_LK_LOCK 0x01
-#define MCD_LK_TEST 0x02
-
-/* DMA Enable Stuff */
-#define MCD_DMA_IRQFLAGS 0x10 /* Set data0 for IRQ click */
-
-#define MCD_DMA_PREIRQ 0x01 /* All of these are for */
-#define MCD_DMA_POSTIRQ 0x02 /* MCD_DMA_IRQFLAG... */
-#define MCD_DMA_ERRIRQ 0x04 /* */
-
-#define MCD_DMA_TIMEOUT 0x08 /* Set data0 for DMA timeout */
-#define MCD_DMA_UPCFLAG 0x04 /* 1 = Next command will be READUPC */
-
-#define MCD_DMA_DMAMODE 0x02 /* 1 = Data uses DMA */
-#define MCD_DMA_TRANSFERLENGTH 0x01 /* data0 = MSB, data1 = LSB of block length */
-
-struct mcd_dma_mode {
- u_char dma_mode;
- u_char data0; /* If dma_mode & 0x10: Use IRQ settings */
- u_char data1; /* Used if dma_mode & 0x01 */
-} __packed;
-
-struct mcd_volinfo {
- bcd_t trk_low;
- bcd_t trk_high;
- bcd_t vol_msf[3];
- bcd_t trk1_msf[3];
-} __packed;
-
-struct mcd_qchninfo {
- u_char addr_type:4;
- u_char control:4;
- u_char trk_no;
- u_char idx_no;
- bcd_t trk_size_msf[3];
- u_char :8;
- bcd_t hd_pos_msf[3];
-} __packed;
-
-struct mcd_volume {
- u_char v0l;
- u_char v0rs;
- u_char v0r;
- u_char v0ls;
-} __packed;
-
-struct mcd_holdtime {
- u_char units_of_ten_seconds;
- /* If this is 0, the default (12) is used */
-} __packed;
-
-struct mcd_read1 {
- bcd_t start_msf[3];
- u_char nsec[3];
-} __packed;
-
-struct mcd_read2 {
- bcd_t start_msf[3];
- bcd_t end_msf[3];
-} __packed;
-
-struct mcd_rawsector {
- u_char sync1[12];
- u_char header[4];
- u_char subheader1[4];
- u_char subheader2[4];
- u_char data[MCDBLK];
- u_char ecc_bits[280];
-} __packed;
-
-#endif /* MCD_H */
diff --git a/sys/dev/mcd/mcdvar.h b/sys/dev/mcd/mcdvar.h
deleted file mode 100644
index 71e45d0b0bd2..000000000000
--- a/sys/dev/mcd/mcdvar.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * $FreeBSD$
- */
-
-struct mcd_mbx {
- short retry;
- short nblk;
- int sz;
- u_long skip;
- struct bio * bp;
- short count;
- short mode;
-};
-
-struct mcd_data {
- short type;
- char * name;
- short config;
- short flags;
- u_char read_command;
- short status;
- int blksize;
- u_long disksize;
- int partflags;
- int openflags;
- struct mcd_volinfo volinfo;
- struct mcd_qchninfo toc[MCD_MAXTOCS];
- short audio_status;
- short curr_mode;
- struct mcd_read2 lastpb;
- short debug;
- struct bio_queue_head head; /* head of bio queue */
- struct mcd_mbx mbx;
-};
-
-struct mcd_softc {
- device_t dev;
- struct cdev *mcd_dev_t;
- int debug;
-
- struct resource * port;
- int port_rid;
- int port_type;
-
- struct resource * irq;
- int irq_rid;
- int irq_type;
- void * irq_ih;
-
- struct resource * drq;
- int drq_rid;
- int drq_type;
-
- struct mtx mtx;
-
- struct callout timer;
- int ch_state;
- struct mcd_mbx * ch_mbxsave;
-
- struct mcd_data data;
-};
-
-#define MCD_LOCK(_sc) mtx_lock(&_sc->mtx)
-#define MCD_UNLOCK(_sc) mtx_unlock(&_sc->mtx)
-#define MCD_ASSERT_LOCKED(_sc) mtx_assert(&_sc->mtx, MA_OWNED)
-
-#define MCD_READ(_sc, _reg) bus_read_1(_sc->port, _reg)
-#define MCD_WRITE(_sc, _reg, _val) bus_write_1(_sc->port, _reg, _val)
-
-int mcd_probe (struct mcd_softc *);
-int mcd_attach (struct mcd_softc *);
diff --git a/sys/dev/mlx5/mlx5_en/en.h b/sys/dev/mlx5/mlx5_en/en.h
index e894a3fa6251..0ed0a0e12380 100644
--- a/sys/dev/mlx5/mlx5_en/en.h
+++ b/sys/dev/mlx5/mlx5_en/en.h
@@ -59,10 +59,6 @@
#include <machine/bus.h>
-#ifdef HAVE_TURBO_LRO
-#include "tcp_tlro.h"
-#endif
-
#include <dev/mlx5/driver.h>
#include <dev/mlx5/qp.h>
#include <dev/mlx5/cq.h>
@@ -460,11 +456,7 @@ struct mlx5e_rq {
struct ifnet *ifp;
struct mlx5e_rq_stats stats;
struct mlx5e_cq cq;
-#ifdef HAVE_TURBO_LRO
- struct tlro_ctrl lro;
-#else
struct lro_ctrl lro;
-#endif
volatile int enabled;
int ix;
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
index 4a8029bd95d9..2136567d2e15 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_main.c
@@ -666,10 +666,15 @@ mlx5e_create_rq(struct mlx5e_channel *c,
}
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
+
+ err = -tcp_lro_init_args(&rq->lro, c->ifp, TCP_LRO_ENTRIES, wq_sz);
+ if (err)
+ goto err_rq_wq_destroy;
+
rq->mbuf = malloc(wq_sz * sizeof(rq->mbuf[0]), M_MLX5EN, M_WAITOK | M_ZERO);
if (rq->mbuf == NULL) {
err = -ENOMEM;
- goto err_rq_wq_destroy;
+ goto err_lro_init;
}
for (i = 0; i != wq_sz; i++) {
struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
@@ -694,20 +699,12 @@ mlx5e_create_rq(struct mlx5e_channel *c,
mlx5e_create_stats(&rq->stats.ctx, SYSCTL_CHILDREN(priv->sysctl_ifnet),
buffer, mlx5e_rq_stats_desc, MLX5E_RQ_STATS_NUM,
rq->stats.arg);
-
-#ifdef HAVE_TURBO_LRO
- if (tcp_tlro_init(&rq->lro, c->ifp, MLX5E_BUDGET_MAX) != 0)
- rq->lro.mbuf = NULL;
-#else
- if (tcp_lro_init(&rq->lro))
- rq->lro.lro_cnt = 0;
- else
- rq->lro.ifp = c->ifp;
-#endif
return (0);
err_rq_mbuf_free:
free(rq->mbuf, M_MLX5EN);
+err_lro_init:
+ tcp_lro_free(&rq->lro);
err_rq_wq_destroy:
mlx5_wq_destroy(&rq->wq_ctrl);
err_free_dma_tag:
@@ -726,11 +723,8 @@ mlx5e_destroy_rq(struct mlx5e_rq *rq)
sysctl_ctx_free(&rq->stats.ctx);
/* free leftover LRO packets, if any */
-#ifdef HAVE_TURBO_LRO
- tcp_tlro_free(&rq->lro);
-#else
tcp_lro_free(&rq->lro);
-#endif
+
wq_sz = mlx5_wq_ll_get_size(&rq->wq);
for (i = 0; i != wq_sz; i++) {
if (rq->mbuf[i].mbuf != NULL) {
@@ -3114,6 +3108,13 @@ mlx5e_destroy_ifp(struct mlx5_core_dev *mdev, void *vpriv)
/* don't allow more IOCTLs */
priv->gone = 1;
+ /*
+ * Clear the device description to avoid use after free,
+ * because the bsddev is not destroyed when this module is
+ * unloaded:
+ */
+ device_set_desc(mdev->pdev->dev.bsddev, NULL);
+
/* XXX wait a bit to allow IOCTL handlers to complete */
pause("W", hz);
diff --git a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
index 6bc2c17370a6..5c793473e63c 100644
--- a/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
+++ b/sys/dev/mlx5/mlx5_en/mlx5_en_rx.c
@@ -369,15 +369,9 @@ mlx5e_poll_rx_cq(struct mlx5e_rq *rq, int budget)
mlx5e_build_rx_mbuf(cqe, rq, mb, byte_cnt);
rq->stats.packets++;
-#ifdef HAVE_TURBO_LRO
- if (mb->m_pkthdr.csum_flags == 0 ||
- (rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
- rq->lro.mbuf == NULL) {
- /* normal input */
- rq->ifp->if_input(rq->ifp, mb);
- } else {
- tcp_tlro_rx(&rq->lro, mb);
- }
+
+#if !defined(HAVE_TCP_LRO_RX)
+ tcp_lro_queue_mbuf(&rq->lro, mb);
#else
if (mb->m_pkthdr.csum_flags == 0 ||
(rq->ifp->if_capenable & IFCAP_LRO) == 0 ||
@@ -395,9 +389,6 @@ wq_ll_pop:
/* ensure cq space is freed before enabling more cqes */
wmb();
-#ifndef HAVE_TURBO_LRO
- tcp_lro_flush_all(&rq->lro);
-#endif
return (i);
}
@@ -437,8 +428,6 @@ mlx5e_rx_cq_comp(struct mlx5_core_cq *mcq)
}
mlx5e_post_rx_wqes(rq);
mlx5e_cq_arm(&rq->cq);
-#ifdef HAVE_TURBO_LRO
- tcp_tlro_flush(&rq->lro, 1);
-#endif
+ tcp_lro_flush_all(&rq->lro);
mtx_unlock(&rq->mtx);
}
diff --git a/sys/dev/mlx5/mlx5_en/tcp_tlro.c b/sys/dev/mlx5/mlx5_en/tcp_tlro.c
deleted file mode 100644
index 4b8190455636..000000000000
--- a/sys/dev/mlx5/mlx5_en/tcp_tlro.c
+++ /dev/null
@@ -1,697 +0,0 @@
-/*-
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#include <sys/cdefs.h>
-__FBSDID("$FreeBSD$");
-
-#include "opt_inet.h"
-#include "opt_inet6.h"
-
-#include <sys/param.h>
-#include <sys/libkern.h>
-#include <sys/mbuf.h>
-#include <sys/lock.h>
-#include <sys/mutex.h>
-#include <sys/sysctl.h>
-#include <sys/malloc.h>
-#include <sys/kernel.h>
-#include <sys/endian.h>
-#include <sys/socket.h>
-#include <sys/sockopt.h>
-#include <sys/smp.h>
-
-#include <net/if.h>
-#include <net/if_var.h>
-#include <net/ethernet.h>
-
-#if defined(INET) || defined(INET6)
-#include <netinet/in.h>
-#endif
-
-#ifdef INET
-#include <netinet/ip.h>
-#endif
-
-#ifdef INET6
-#include <netinet/ip6.h>
-#endif
-
-#include <netinet/tcp_var.h>
-
-#include "tcp_tlro.h"
-
-#ifndef M_HASHTYPE_LRO_TCP
-#ifndef KLD_MODULE
-#warning "M_HASHTYPE_LRO_TCP is not defined"
-#endif
-#define M_HASHTYPE_LRO_TCP 254
-#endif
-
-static SYSCTL_NODE(_net_inet_tcp, OID_AUTO, tlro,
- CTLFLAG_RW, 0, "TCP turbo LRO parameters");
-
-static MALLOC_DEFINE(M_TLRO, "TLRO", "Turbo LRO");
-
-static int tlro_min_rate = 20; /* Hz */
-
-SYSCTL_INT(_net_inet_tcp_tlro, OID_AUTO, min_rate, CTLFLAG_RWTUN,
- &tlro_min_rate, 0, "Minimum serving rate in Hz");
-
-static int tlro_max_packet = IP_MAXPACKET;
-
-SYSCTL_INT(_net_inet_tcp_tlro, OID_AUTO, max_packet, CTLFLAG_RWTUN,
- &tlro_max_packet, 0, "Maximum packet size in bytes");
-
-typedef struct {
- uint32_t value;
-} __packed uint32_p_t;
-
-static uint16_t
-tcp_tlro_csum(const uint32_p_t *p, size_t l)
-{
- const uint32_p_t *pend = p + (l / 4);
- uint64_t cs;
-
- for (cs = 0; p != pend; p++)
- cs += le32toh(p->value);
- while (cs > 0xffff)
- cs = (cs >> 16) + (cs & 0xffff);
- return (cs);
-}
-
-static void *
-tcp_tlro_get_header(const struct mbuf *m, const u_int off,
- const u_int len)
-{
- if (m->m_len < (off + len))
- return (NULL);
- return (mtod(m, char *) + off);
-}
-
-static uint8_t
-tcp_tlro_info_save_timestamp(struct tlro_mbuf_data *pinfo)
-{
- struct tcphdr *tcp = pinfo->tcp;
- uint32_t *ts_ptr;
-
- if (tcp->th_off < ((TCPOLEN_TSTAMP_APPA + sizeof(*tcp)) >> 2))
- return (0);
-
- ts_ptr = (uint32_t *)(tcp + 1);
- if (*ts_ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
- return (0);
-
- /* Save timestamps */
- pinfo->tcp_ts = ts_ptr[1];
- pinfo->tcp_ts_reply = ts_ptr[2];
- return (1);
-}
-
-static void
-tcp_tlro_info_restore_timestamp(struct tlro_mbuf_data *pinfoa,
- struct tlro_mbuf_data *pinfob)
-{
- struct tcphdr *tcp = pinfoa->tcp;
- uint32_t *ts_ptr;
-
- if (tcp->th_off < ((TCPOLEN_TSTAMP_APPA + sizeof(*tcp)) >> 2))
- return;
-
- ts_ptr = (uint32_t *)(tcp + 1);
- if (*ts_ptr != ntohl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
- (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP))
- return;
-
- /* Restore timestamps */
- ts_ptr[1] = pinfob->tcp_ts;
- ts_ptr[2] = pinfob->tcp_ts_reply;
-}
-
-static void
-tcp_tlro_extract_header(struct tlro_mbuf_data *pinfo, struct mbuf *m, int seq)
-{
- uint8_t *phdr = (uint8_t *)pinfo->buf;
- struct ether_header *eh;
- struct ether_vlan_header *vlan;
-#ifdef INET
- struct ip *ip;
-#endif
-#ifdef INET6
- struct ip6_hdr *ip6;
-#endif
- struct tcphdr *tcp;
- uint16_t etype;
- int diff;
- int off;
-
- /* Fill in information */
- pinfo->head = m;
- pinfo->last_tick = ticks;
- pinfo->sequence = seq;
- pinfo->pprev = &m_last(m)->m_next;
-
- off = sizeof(*eh);
- if (m->m_len < off)
- goto error;
- eh = tcp_tlro_get_header(m, 0, sizeof(*eh));
- if (eh == NULL)
- goto error;
- memcpy(phdr, &eh->ether_dhost, ETHER_ADDR_LEN);
- phdr += ETHER_ADDR_LEN;
- memcpy(phdr, &eh->ether_type, sizeof(eh->ether_type));
- phdr += sizeof(eh->ether_type);
- etype = ntohs(eh->ether_type);
-
- if (etype == ETHERTYPE_VLAN) {
- vlan = tcp_tlro_get_header(m, off, sizeof(*vlan));
- if (vlan == NULL)
- goto error;
- memcpy(phdr, &vlan->evl_tag, sizeof(vlan->evl_tag) +
- sizeof(vlan->evl_proto));
- phdr += sizeof(vlan->evl_tag) + sizeof(vlan->evl_proto);
- etype = ntohs(vlan->evl_proto);
- off += sizeof(*vlan) - sizeof(*eh);
- }
- switch (etype) {
-#ifdef INET
- case ETHERTYPE_IP:
- /*
- * Cannot LRO:
- * - Non-IP packets
- * - Fragmented packets
- * - Packets with IPv4 options
- * - Non-TCP packets
- */
- ip = tcp_tlro_get_header(m, off, sizeof(*ip));
- if (ip == NULL ||
- (ip->ip_off & htons(IP_MF | IP_OFFMASK)) != 0 ||
- (ip->ip_p != IPPROTO_TCP) ||
- (ip->ip_hl << 2) != sizeof(*ip))
- goto error;
-
- /* Legacy IP has a header checksum that needs to be correct */
- if (!(m->m_pkthdr.csum_flags & CSUM_IP_CHECKED)) {
- /* Verify IP header */
- if (tcp_tlro_csum((uint32_p_t *)ip, sizeof(*ip)) != 0xFFFF)
- m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED;
- else
- m->m_pkthdr.csum_flags |= CSUM_IP_CHECKED |
- CSUM_IP_VALID;
- }
- /* Only accept valid checksums */
- if (!(m->m_pkthdr.csum_flags & CSUM_IP_VALID) ||
- !(m->m_pkthdr.csum_flags & CSUM_DATA_VALID))
- goto error;
- memcpy(phdr, &ip->ip_src, sizeof(ip->ip_src) +
- sizeof(ip->ip_dst));
- phdr += sizeof(ip->ip_src) + sizeof(ip->ip_dst);
- if (M_HASHTYPE_GET(m) == M_HASHTYPE_LRO_TCP)
- pinfo->ip_len = m->m_pkthdr.len - off;
- else
- pinfo->ip_len = ntohs(ip->ip_len);
- pinfo->ip_hdrlen = sizeof(*ip);
- pinfo->ip.v4 = ip;
- pinfo->ip_version = 4;
- off += sizeof(*ip);
- break;
-#endif
-#ifdef INET6
- case ETHERTYPE_IPV6:
- /*
- * Cannot LRO:
- * - Non-IP packets
- * - Packets with IPv6 options
- * - Non-TCP packets
- */
- ip6 = tcp_tlro_get_header(m, off, sizeof(*ip6));
- if (ip6 == NULL || ip6->ip6_nxt != IPPROTO_TCP)
- goto error;
- if (!(m->m_pkthdr.csum_flags & CSUM_DATA_VALID))
- goto error;
- memcpy(phdr, &ip6->ip6_src, sizeof(struct in6_addr) +
- sizeof(struct in6_addr));
- phdr += sizeof(struct in6_addr) + sizeof(struct in6_addr);
- if (M_HASHTYPE_GET(m) == M_HASHTYPE_LRO_TCP)
- pinfo->ip_len = m->m_pkthdr.len - off;
- else
- pinfo->ip_len = ntohs(ip6->ip6_plen) + sizeof(*ip6);
- pinfo->ip_hdrlen = sizeof(*ip6);
- pinfo->ip.v6 = ip6;
- pinfo->ip_version = 6;
- off += sizeof(*ip6);
- break;
-#endif
- default:
- goto error;
- }
- tcp = tcp_tlro_get_header(m, off, sizeof(*tcp));
- if (tcp == NULL)
- goto error;
- memcpy(phdr, &tcp->th_sport, sizeof(tcp->th_sport) +
- sizeof(tcp->th_dport));
- phdr += sizeof(tcp->th_sport) +
- sizeof(tcp->th_dport);
- /* Store TCP header length */
- *phdr++ = tcp->th_off;
- if (tcp->th_off < (sizeof(*tcp) >> 2))
- goto error;
-
- /* Compute offset to data payload */
- pinfo->tcp_len = (tcp->th_off << 2);
- off += pinfo->tcp_len;
-
- /* Store more info */
- pinfo->data_off = off;
- pinfo->tcp = tcp;
-
- /* Try to save timestamp, if any */
- *phdr++ = tcp_tlro_info_save_timestamp(pinfo);
-
- /* Verify offset and IP/TCP length */
- if (off > m->m_pkthdr.len ||
- pinfo->ip_len < pinfo->tcp_len)
- goto error;
-
- /* Compute data payload length */
- pinfo->data_len = (pinfo->ip_len - pinfo->tcp_len - pinfo->ip_hdrlen);
-
- /* Trim any padded data */
- diff = (m->m_pkthdr.len - off) - pinfo->data_len;
- if (diff != 0) {
- if (diff < 0)
- goto error;
- else
- m_adj(m, -diff);
- }
- /* Compute header length */
- pinfo->buf_length = phdr - (uint8_t *)pinfo->buf;
- /* Zero-pad rest of buffer */
- memset(phdr, 0, TLRO_MAX_HEADER - pinfo->buf_length);
- return;
-error:
- pinfo->buf_length = 0;
-}
-
-static int
-tcp_tlro_cmp64(const uint64_t *pa, const uint64_t *pb)
-{
- int64_t diff = 0;
- unsigned x;
-
- for (x = 0; x != TLRO_MAX_HEADER / 8; x++) {
- /*
- * NOTE: Endianness does not matter in this
- * comparisation:
- */
- diff = pa[x] - pb[x];
- if (diff != 0)
- goto done;
- }
-done:
- if (diff < 0)
- return (-1);
- else if (diff > 0)
- return (1);
- return (0);
-}
-
-static int
-tcp_tlro_compare_header(const void *_ppa, const void *_ppb)
-{
- const struct tlro_mbuf_ptr *ppa = _ppa;
- const struct tlro_mbuf_ptr *ppb = _ppb;
- struct tlro_mbuf_data *pinfoa = ppa->data;
- struct tlro_mbuf_data *pinfob = ppb->data;
- int ret;
-
- ret = (pinfoa->head == NULL) - (pinfob->head == NULL);
- if (ret != 0)
- goto done;
-
- ret = pinfoa->buf_length - pinfob->buf_length;
- if (ret != 0)
- goto done;
- if (pinfoa->buf_length != 0) {
- ret = tcp_tlro_cmp64(pinfoa->buf, pinfob->buf);
- if (ret != 0)
- goto done;
- ret = ntohl(pinfoa->tcp->th_seq) - ntohl(pinfob->tcp->th_seq);
- if (ret != 0)
- goto done;
- ret = ntohl(pinfoa->tcp->th_ack) - ntohl(pinfob->tcp->th_ack);
- if (ret != 0)
- goto done;
- ret = pinfoa->sequence - pinfob->sequence;
- if (ret != 0)
- goto done;
- }
-done:
- return (ret);
-}
-
-static void
-tcp_tlro_sort(struct tlro_ctrl *tlro)
-{
- if (tlro->curr == 0)
- return;
-
- qsort(tlro->mbuf, tlro->curr, sizeof(struct tlro_mbuf_ptr),
- &tcp_tlro_compare_header);
-}
-
-static int
-tcp_tlro_get_ticks(void)
-{
- int to = tlro_min_rate;
-
- if (to < 1)
- to = 1;
- to = hz / to;
- if (to < 1)
- to = 1;
- return (to);
-}
-
-static void
-tcp_tlro_combine(struct tlro_ctrl *tlro, int force)
-{
- struct tlro_mbuf_data *pinfoa;
- struct tlro_mbuf_data *pinfob;
- uint32_t cs;
- int curr_ticks = ticks;
- int ticks_limit = tcp_tlro_get_ticks();
- unsigned x;
- unsigned y;
- unsigned z;
- int temp;
-
- if (tlro->curr == 0)
- return;
-
- for (y = 0; y != tlro->curr;) {
- struct mbuf *m;
-
- pinfoa = tlro->mbuf[y].data;
- for (x = y + 1; x != tlro->curr; x++) {
- pinfob = tlro->mbuf[x].data;
- if (pinfoa->buf_length != pinfob->buf_length ||
- tcp_tlro_cmp64(pinfoa->buf, pinfob->buf) != 0)
- break;
- }
- if (pinfoa->buf_length == 0) {
- /* Forward traffic which cannot be combined */
- for (z = y; z != x; z++) {
- /* Just forward packets */
- pinfob = tlro->mbuf[z].data;
-
- m = pinfob->head;
-
- /* Reset info structure */
- pinfob->head = NULL;
- pinfob->buf_length = 0;
-
- /* Do stats */
- tlro->lro_flushed++;
-
- /* Input packet to network layer */
- (*tlro->ifp->if_input) (tlro->ifp, m);
- }
- y = z;
- continue;
- }
-
- /* Compute current checksum subtracted some header parts */
- temp = (pinfoa->ip_len - pinfoa->ip_hdrlen);
- cs = ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
- tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len);
-
- /* Append all fragments into one block */
- for (z = y + 1; z != x; z++) {
-
- pinfob = tlro->mbuf[z].data;
-
- /* Check for command packets */
- if ((pinfoa->tcp->th_flags & ~(TH_ACK | TH_PUSH)) ||
- (pinfob->tcp->th_flags & ~(TH_ACK | TH_PUSH)))
- break;
-
- /* Check if there is enough space */
- if ((pinfoa->ip_len + pinfob->data_len) > tlro_max_packet)
- break;
-
- /* Try to append the new segment */
- temp = ntohl(pinfoa->tcp->th_seq) + pinfoa->data_len;
- if (temp != (int)ntohl(pinfob->tcp->th_seq))
- break;
-
- temp = pinfob->ip_len - pinfob->ip_hdrlen;
- cs += ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8) +
- tcp_tlro_csum((uint32_p_t *)pinfob->tcp, pinfob->tcp_len);
- /* Remove fields which appear twice */
- cs += (IPPROTO_TCP << 8);
- if (pinfob->ip_version == 4) {
- cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v4->ip_src, 4);
- cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v4->ip_dst, 4);
- } else {
- cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_src, 16);
- cs += tcp_tlro_csum((uint32_p_t *)&pinfob->ip.v6->ip6_dst, 16);
- }
- /* Remainder computation */
- while (cs > 0xffff)
- cs = (cs >> 16) + (cs & 0xffff);
-
- /* Update window and ack sequence number */
- pinfoa->tcp->th_ack = pinfob->tcp->th_ack;
- pinfoa->tcp->th_win = pinfob->tcp->th_win;
-
- /* Check if we should restore the timestamp */
- tcp_tlro_info_restore_timestamp(pinfoa, pinfob);
-
- /* Accumulate TCP flags */
- pinfoa->tcp->th_flags |= pinfob->tcp->th_flags;
-
- /* update lengths */
- pinfoa->ip_len += pinfob->data_len;
- pinfoa->data_len += pinfob->data_len;
-
- /* Clear mbuf pointer - packet is accumulated */
- m = pinfob->head;
-
- /* Reset info structure */
- pinfob->head = NULL;
- pinfob->buf_length = 0;
-
- /* Append data to mbuf [y] */
- m_adj(m, pinfob->data_off);
- /* Delete mbuf tags, if any */
- m_tag_delete_chain(m, NULL);
- /* Clear packet header flag */
- m->m_flags &= ~M_PKTHDR;
-
- /* Concat mbuf(s) to end of list */
- pinfoa->pprev[0] = m;
- m = m_last(m);
- pinfoa->pprev = &m->m_next;
- pinfoa->head->m_pkthdr.len += pinfob->data_len;
- }
- /* Compute new TCP header checksum */
- pinfoa->tcp->th_sum = 0;
-
- temp = pinfoa->ip_len - pinfoa->ip_hdrlen;
- cs = (cs ^ 0xFFFF) +
- tcp_tlro_csum((uint32_p_t *)pinfoa->tcp, pinfoa->tcp_len) +
- ((temp & 0xFF) << 8) + ((temp & 0xFF00) >> 8);
-
- /* Remainder computation */
- while (cs > 0xffff)
- cs = (cs >> 16) + (cs & 0xffff);
-
- /* Update new checksum */
- pinfoa->tcp->th_sum = ~htole16(cs);
-
- /* Update IP length, if any */
- if (pinfoa->ip_version == 4) {
- if (pinfoa->ip_len > IP_MAXPACKET) {
- M_HASHTYPE_SET(pinfoa->head, M_HASHTYPE_LRO_TCP);
- pinfoa->ip.v4->ip_len = htons(IP_MAXPACKET);
- } else {
- pinfoa->ip.v4->ip_len = htons(pinfoa->ip_len);
- }
- } else {
- if (pinfoa->ip_len > (IP_MAXPACKET + sizeof(*pinfoa->ip.v6))) {
- M_HASHTYPE_SET(pinfoa->head, M_HASHTYPE_LRO_TCP);
- pinfoa->ip.v6->ip6_plen = htons(IP_MAXPACKET);
- } else {
- temp = pinfoa->ip_len - sizeof(*pinfoa->ip.v6);
- pinfoa->ip.v6->ip6_plen = htons(temp);
- }
- }
-
- temp = curr_ticks - pinfoa->last_tick;
- /* Check if packet should be forwarded */
- if (force != 0 || z != x || temp >= ticks_limit ||
- pinfoa->data_len == 0) {
-
- /* Compute new IPv4 header checksum */
- if (pinfoa->ip_version == 4) {
- pinfoa->ip.v4->ip_sum = 0;
- cs = tcp_tlro_csum((uint32_p_t *)pinfoa->ip.v4,
- sizeof(*pinfoa->ip.v4));
- pinfoa->ip.v4->ip_sum = ~htole16(cs);
- }
- /* Forward packet */
- m = pinfoa->head;
-
- /* Reset info structure */
- pinfoa->head = NULL;
- pinfoa->buf_length = 0;
-
- /* Do stats */
- tlro->lro_flushed++;
-
- /* Input packet to network layer */
- (*tlro->ifp->if_input) (tlro->ifp, m);
- }
- y = z;
- }
-
- /* Cleanup all NULL heads */
- for (y = 0; y != tlro->curr; y++) {
- if (tlro->mbuf[y].data->head == NULL) {
- for (z = y + 1; z != tlro->curr; z++) {
- struct tlro_mbuf_ptr ptemp;
- if (tlro->mbuf[z].data->head == NULL)
- continue;
- ptemp = tlro->mbuf[y];
- tlro->mbuf[y] = tlro->mbuf[z];
- tlro->mbuf[z] = ptemp;
- y++;
- }
- break;
- }
- }
- tlro->curr = y;
-}
-
-static void
-tcp_tlro_cleanup(struct tlro_ctrl *tlro)
-{
- while (tlro->curr != 0 &&
- tlro->mbuf[tlro->curr - 1].data->head == NULL)
- tlro->curr--;
-}
-
-void
-tcp_tlro_flush(struct tlro_ctrl *tlro, int force)
-{
- if (tlro->curr == 0)
- return;
-
- tcp_tlro_sort(tlro);
- tcp_tlro_cleanup(tlro);
- tcp_tlro_combine(tlro, force);
-}
-
-int
-tcp_tlro_init(struct tlro_ctrl *tlro, struct ifnet *ifp,
- int max_mbufs)
-{
- ssize_t size;
- uint32_t x;
-
- /* Set zero defaults */
- memset(tlro, 0, sizeof(*tlro));
-
- /* Compute size needed for data */
- size = (sizeof(struct tlro_mbuf_ptr) * max_mbufs) +
- (sizeof(struct tlro_mbuf_data) * max_mbufs);
-
- /* Range check */
- if (max_mbufs <= 0 || size <= 0 || ifp == NULL)
- return (EINVAL);
-
- /* Setup tlro control structure */
- tlro->mbuf = malloc(size, M_TLRO, M_WAITOK | M_ZERO);
- tlro->max = max_mbufs;
- tlro->ifp = ifp;
-
- /* Setup pointer array */
- for (x = 0; x != tlro->max; x++) {
- tlro->mbuf[x].data = ((struct tlro_mbuf_data *)
- &tlro->mbuf[max_mbufs]) + x;
- }
- return (0);
-}
-
-void
-tcp_tlro_free(struct tlro_ctrl *tlro)
-{
- struct tlro_mbuf_data *pinfo;
- struct mbuf *m;
- uint32_t y;
-
- /* Check if not setup */
- if (tlro->mbuf == NULL)
- return;
- /* Free MBUF array and any leftover MBUFs */
- for (y = 0; y != tlro->max; y++) {
-
- pinfo = tlro->mbuf[y].data;
-
- m = pinfo->head;
-
- /* Reset info structure */
- pinfo->head = NULL;
- pinfo->buf_length = 0;
-
- m_freem(m);
- }
- free(tlro->mbuf, M_TLRO);
- /* Reset buffer */
- memset(tlro, 0, sizeof(*tlro));
-}
-
-void
-tcp_tlro_rx(struct tlro_ctrl *tlro, struct mbuf *m)
-{
- if (m->m_len > 0 && tlro->curr < tlro->max) {
- /* do stats */
- tlro->lro_queued++;
-
- /* extract header */
- tcp_tlro_extract_header(tlro->mbuf[tlro->curr++].data,
- m, tlro->sequence++);
- } else if (tlro->ifp != NULL) {
- /* do stats */
- tlro->lro_flushed++;
-
- /* input packet to network layer */
- (*tlro->ifp->if_input) (tlro->ifp, m);
- } else {
- /* packet drop */
- m_freem(m);
- }
-}
diff --git a/sys/dev/mlx5/mlx5_en/tcp_tlro.h b/sys/dev/mlx5/mlx5_en/tcp_tlro.h
deleted file mode 100644
index 40ebdf681902..000000000000
--- a/sys/dev/mlx5/mlx5_en/tcp_tlro.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*-
- * Copyright (c) 2015 Mellanox Technologies. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS `AS IS' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * $FreeBSD$
- */
-
-#ifndef _TCP_TLRO_H_
-#define _TCP_TLRO_H_
-
-#define TLRO_MAX_HEADER 64 /* bytes */
-
-struct ip;
-struct ip6_hdr;
-struct tcphdr;
-
-struct tlro_mbuf_data {
- union {
-#ifdef INET
- struct ip *v4;
-#endif
-#ifdef INET6
- struct ip6_hdr *v6;
-#endif
- } ip;
- struct tcphdr *tcp;
- struct mbuf *head;
- struct mbuf **pprev;
- int last_tick;
- int sequence;
- int data_len;
- int data_off;
- int ip_hdrlen;
- int ip_len;
- uint32_t tcp_ts;
- uint32_t tcp_ts_reply;
- uint16_t tcp_len;
- uint8_t ip_version;
- uint8_t buf_length; /* in 32-bit words */
- uint64_t buf[TLRO_MAX_HEADER / 8];
-} __aligned(256);
-
-struct tlro_mbuf_ptr {
- struct tlro_mbuf_data *data;
-};
-
-/* NB: This is part of driver structs */
-struct tlro_ctrl {
- struct ifnet *ifp;
- struct tlro_mbuf_ptr *mbuf;
- uint64_t lro_queued;
- uint64_t lro_flushed;
- uint32_t max;
- uint32_t curr;
- int sequence;
-};
-
-int tcp_tlro_init(struct tlro_ctrl *, struct ifnet *, int);
-void tcp_tlro_free(struct tlro_ctrl *);
-void tcp_tlro_flush(struct tlro_ctrl *, int);
-void tcp_tlro_rx(struct tlro_ctrl *, struct mbuf *);
-
-#endif /* _TCP_TLRO_H_ */
diff --git a/sys/dev/mpt/mpt_pci.c b/sys/dev/mpt/mpt_pci.c
index b014874e5553..7caf6d4f568b 100644
--- a/sys/dev/mpt/mpt_pci.c
+++ b/sys/dev/mpt/mpt_pci.c
@@ -654,10 +654,6 @@ mpt_dma_mem_alloc(struct mpt_softc *mpt)
len = sizeof (request_t) * MPT_MAX_REQUESTS(mpt);
mpt->request_pool = (request_t *)malloc(len, M_DEVBUF, M_WAITOK|M_ZERO);
- if (mpt->request_pool == NULL) {
- mpt_prt(mpt, "cannot allocate request pool\n");
- return (1);
- }
/*
* Create a parent dma tag for this device.
diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c
index 458d7fdeb43c..cc1351db0c46 100644
--- a/sys/dev/msk/if_msk.c
+++ b/sys/dev/msk/if_msk.c
@@ -1953,12 +1953,6 @@ mskc_attach(device_t dev)
goto fail;
}
mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK | M_ZERO);
- if (mmd == NULL) {
- device_printf(dev, "failed to allocate memory for "
- "ivars of PORT_A\n");
- error = ENXIO;
- goto fail;
- }
mmd->port = MSK_PORT_A;
mmd->pmd = sc->msk_pmd;
mmd->mii_flags |= MIIF_DOPAUSE;
@@ -1977,12 +1971,6 @@ mskc_attach(device_t dev)
}
mmd = malloc(sizeof(struct msk_mii_data), M_DEVBUF, M_WAITOK |
M_ZERO);
- if (mmd == NULL) {
- device_printf(dev, "failed to allocate memory for "
- "ivars of PORT_B\n");
- error = ENXIO;
- goto fail;
- }
mmd->port = MSK_PORT_B;
mmd->pmd = sc->msk_pmd;
if (sc->msk_pmd == 'L' || sc->msk_pmd == 'S')
diff --git a/sys/dev/nand/nand_generic.c b/sys/dev/nand/nand_generic.c
index 59a6bf2001c1..272b9590b1f8 100644
--- a/sys/dev/nand/nand_generic.c
+++ b/sys/dev/nand/nand_generic.c
@@ -228,8 +228,6 @@ generic_nand_attach(device_t dev)
if (ivar->is_onfi) {
onfi_chip_params = malloc(sizeof(struct onfi_chip_params),
M_NAND, M_WAITOK | M_ZERO);
- if (onfi_chip_params == NULL)
- return (ENOMEM);
if (onfi_read_parameter(chip, onfi_chip_params)) {
nand_debug(NDBG_GEN,"Could not read parameter page!\n");
@@ -741,10 +739,6 @@ onfi_is_blk_bad(device_t device, uint32_t block_number, uint8_t *bad)
chip = device_get_softc(device);
oob = malloc(chip->chip_geom.oob_size, M_NAND, M_WAITOK);
- if (!oob) {
- device_printf(device, "%s: cannot allocate oob\n", __func__);
- return (ENOMEM);
- }
page_number = block_number * chip->chip_geom.pgs_per_blk;
*bad = 0;
@@ -1001,10 +995,6 @@ generic_is_blk_bad(device_t dev, uint32_t block, uint8_t *bad)
chip = device_get_softc(dev);
oob = malloc(chip->chip_geom.oob_size, M_NAND, M_WAITOK);
- if (!oob) {
- device_printf(dev, "%s: cannot allocate OOB\n", __func__);
- return (ENOMEM);
- }
page_number = block * chip->chip_geom.pgs_per_blk;
*bad = 0;
diff --git a/sys/dev/nand/nandsim_chip.c b/sys/dev/nand/nandsim_chip.c
index 5b568abf58b2..70ccbc9a32b8 100644
--- a/sys/dev/nand/nandsim_chip.c
+++ b/sys/dev/nand/nandsim_chip.c
@@ -90,8 +90,6 @@ nandsim_chip_init(struct nandsim_softc* sc, uint8_t chip_num,
int error;
chip = malloc(sizeof(*chip), M_NANDSIM, M_WAITOK | M_ZERO);
- if (!chip)
- return (NULL);
mtx_init(&chip->ns_lock, "nandsim lock", NULL, MTX_DEF);
callout_init(&chip->ns_callout, 1);
@@ -206,9 +204,6 @@ nandsim_blk_state_init(struct nandsim_chip *chip, uint32_t size,
chip->blk_state = malloc(size * sizeof(struct nandsim_block_state),
M_NANDSIM, M_WAITOK | M_ZERO);
- if (!chip->blk_state) {
- return (-1);
- }
for (i = 0; i < size; i++) {
if (wear_lev)
diff --git a/sys/dev/netmap/netmap_mem2.c b/sys/dev/netmap/netmap_mem2.c
index 023604d49535..fd0c06bb8b57 100644
--- a/sys/dev/netmap/netmap_mem2.c
+++ b/sys/dev/netmap/netmap_mem2.c
@@ -201,7 +201,7 @@ NMD_DEFNACB(void, rings_delete);
static int netmap_mem_map(struct netmap_obj_pool *, struct netmap_adapter *);
static int netmap_mem_unmap(struct netmap_obj_pool *, struct netmap_adapter *);
-static int nm_mem_assign_group(struct netmap_mem_d *, struct device *);
+static int nm_mem_assign_group(struct netmap_mem_d *, device_t);
#define NMA_LOCK_INIT(n) NM_MTX_INIT((n)->nm_mtx)
#define NMA_LOCK_DESTROY(n) NM_MTX_DESTROY((n)->nm_mtx)
@@ -456,7 +456,7 @@ nm_mem_release_id(struct netmap_mem_d *nmd)
}
static int
-nm_mem_assign_group(struct netmap_mem_d *nmd, struct device *dev)
+nm_mem_assign_group(struct netmap_mem_d *nmd, device_t dev)
{
int err = 0, id;
id = nm_iommu_group_id(dev);
diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c
index 5ff1b9ae1764..c67ae0dc682f 100644
--- a/sys/dev/ntb/if_ntb/if_ntb.c
+++ b/sys/dev/ntb/if_ntb/if_ntb.c
@@ -76,7 +76,7 @@ __FBSDID("$FreeBSD$");
static SYSCTL_NODE(_hw, OID_AUTO, if_ntb, CTLFLAG_RW, 0, "if_ntb");
-static unsigned g_if_ntb_num_queues = 1;
+static unsigned g_if_ntb_num_queues = UINT_MAX;
SYSCTL_UINT(_hw_if_ntb, OID_AUTO, num_queues, CTLFLAG_RWTUN,
&g_if_ntb_num_queues, 0, "Number of queues per interface");
@@ -144,7 +144,8 @@ ntb_net_attach(device_t dev)
if_initname(ifp, device_get_name(dev), device_get_unit(dev));
if_setdev(ifp, dev);
- sc->num_queues = g_if_ntb_num_queues;
+ sc->num_queues = min(g_if_ntb_num_queues,
+ ntb_transport_queue_count(dev));
sc->queues = malloc(sc->num_queues * sizeof(struct ntb_net_queue),
M_DEVBUF, M_WAITOK | M_ZERO);
sc->mtu = INT_MAX;
@@ -152,8 +153,7 @@ ntb_net_attach(device_t dev)
q = &sc->queues[i];
q->sc = sc;
q->ifp = ifp;
- q->qp = ntb_transport_create_queue(q,
- device_get_parent(dev), &handlers);
+ q->qp = ntb_transport_create_queue(dev, i, &handlers, q);
if (q->qp == NULL)
break;
sc->mtu = imin(sc->mtu, ntb_transport_max_size(q->qp));
@@ -167,6 +167,7 @@ ntb_net_attach(device_t dev)
callout_init(&q->queue_full, 1);
}
sc->num_queues = i;
+ device_printf(dev, "%d queue(s)\n", sc->num_queues);
if_setinitfn(ifp, ntb_net_init);
if_setsoftc(ifp, sc);
@@ -492,10 +493,9 @@ static void
create_random_local_eui48(u_char *eaddr)
{
static uint8_t counter = 0;
- uint32_t seed = ticks;
eaddr[0] = EUI48_LOCALLY_ADMINISTERED;
- memcpy(&eaddr[1], &seed, sizeof(uint32_t));
+ arc4rand(&eaddr[1], 4, 0);
eaddr[5] = counter++;
}
diff --git a/sys/dev/ntb/ntb.c b/sys/dev/ntb/ntb.c
index 1973cbf0fa28..44c0c618646c 100644
--- a/sys/dev/ntb/ntb.c
+++ b/sys/dev/ntb/ntb.c
@@ -31,6 +31,8 @@ __FBSDID("$FreeBSD$");
#include <sys/kernel.h>
#include <sys/systm.h>
#include <sys/bus.h>
+#include <sys/rmlock.h>
+#include <sys/malloc.h>
#include <sys/module.h>
#include <sys/sysctl.h>
@@ -39,4 +41,422 @@ __FBSDID("$FreeBSD$");
devclass_t ntb_hw_devclass;
SYSCTL_NODE(_hw, OID_AUTO, ntb, CTLFLAG_RW, 0, "NTB sysctls");
+struct ntb_child {
+ device_t dev;
+ int enabled;
+ int mwoff;
+ int mwcnt;
+ int spadoff;
+ int spadcnt;
+ int dboff;
+ int dbmask;
+ void *ctx;
+ const struct ntb_ctx_ops *ctx_ops;
+ struct rmlock ctx_lock;
+ struct ntb_child *next;
+};
+
+int
+ntb_register_device(device_t dev)
+{
+ struct ntb_child **cpp = device_get_softc(dev);
+ struct ntb_child *nc;
+ int i, mw, mwu, mwt, spad, spadu, spadt, db, dbu, dbt;
+ char cfg[128] = "";
+ char buf[32];
+ char *n, *np, *c, *p, *name;
+
+ mwu = 0;
+ mwt = NTB_MW_COUNT(dev);
+ spadu = 0;
+ spadt = NTB_SPAD_COUNT(dev);
+ dbu = 0;
+ dbt = flsll(NTB_DB_VALID_MASK(dev));
+
+ device_printf(dev, "%d memory windows, %d scratchpads, "
+ "%d doorbells\n", mwt, spadt, dbt);
+
+ snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev),
+ device_get_unit(dev));
+ TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg));
+ n = cfg;
+ i = 0;
+ while ((c = strsep(&n, ",")) != NULL) {
+ np = c;
+ name = strsep(&np, ":");
+ if (name != NULL && name[0] == 0)
+ name = NULL;
+ p = strsep(&np, ":");
+ mw = (p && p[0] != 0) ? strtol(p, NULL, 10) : mwt - mwu;
+ p = strsep(&np, ":");
+ spad = (p && p[0] != 0) ? strtol(p, NULL, 10) : spadt - spadu;
+ db = (np && np[0] != 0) ? strtol(np, NULL, 10) : dbt - dbu;
+
+ if (mw > mwt - mwu || spad > spadt - spadu || db > dbt - dbu) {
+ device_printf(dev, "Not enough resources for config\n");
+ break;
+ }
+
+ nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO);
+ nc->mwoff = mwu;
+ nc->mwcnt = mw;
+ nc->spadoff = spadu;
+ nc->spadcnt = spad;
+ nc->dboff = dbu;
+ nc->dbmask = (db == 0) ? 0 : (0xffffffffffffffff >> (64 - db));
+ rm_init(&nc->ctx_lock, "ntb ctx");
+ nc->dev = device_add_child(dev, name, -1);
+ if (nc->dev == NULL) {
+ ntb_unregister_device(dev);
+ return (ENOMEM);
+ }
+ device_set_ivars(nc->dev, nc);
+ *cpp = nc;
+ cpp = &nc->next;
+
+ if (bootverbose) {
+ device_printf(dev, "%d \"%s\":", i, name);
+ if (mw > 0) {
+ printf(" memory windows %d", mwu);
+ if (mw > 1)
+ printf("-%d", mwu + mw - 1);
+ }
+ if (spad > 0) {
+ printf(" scratchpads %d", spadu);
+ if (spad > 1)
+ printf("-%d", spadu + spad - 1);
+ }
+ if (db > 0) {
+ printf(" doorbells %d", dbu);
+ if (db > 1)
+ printf("-%d", dbu + db - 1);
+ }
+ printf("\n");
+ }
+
+ mwu += mw;
+ spadu += spad;
+ dbu += db;
+ i++;
+ }
+
+ bus_generic_attach(dev);
+ return (0);
+}
+
+int
+ntb_unregister_device(device_t dev)
+{
+ struct ntb_child **cpp = device_get_softc(dev);
+ struct ntb_child *nc;
+ int error = 0;
+
+ while ((nc = *cpp) != NULL) {
+ *cpp = (*cpp)->next;
+ error = device_delete_child(dev, nc->dev);
+ if (error)
+ break;
+ rm_destroy(&nc->ctx_lock);
+ free(nc, M_DEVBUF);
+ }
+ return (error);
+}
+
+void
+ntb_link_event(device_t dev)
+{
+ struct ntb_child **cpp = device_get_softc(dev);
+ struct ntb_child *nc;
+ struct rm_priotracker ctx_tracker;
+
+ for (nc = *cpp; nc != NULL; nc = nc->next) {
+ rm_rlock(&nc->ctx_lock, &ctx_tracker);
+ if (nc->ctx_ops != NULL && nc->ctx_ops->link_event != NULL)
+ nc->ctx_ops->link_event(nc->ctx);
+ rm_runlock(&nc->ctx_lock, &ctx_tracker);
+ }
+}
+
+void
+ntb_db_event(device_t dev, uint32_t vec)
+{
+ struct ntb_child **cpp = device_get_softc(dev);
+ struct ntb_child *nc;
+ struct rm_priotracker ctx_tracker;
+
+ for (nc = *cpp; nc != NULL; nc = nc->next) {
+ rm_rlock(&nc->ctx_lock, &ctx_tracker);
+ if (nc->ctx_ops != NULL && nc->ctx_ops->db_event != NULL)
+ nc->ctx_ops->db_event(nc->ctx, vec);
+ rm_runlock(&nc->ctx_lock, &ctx_tracker);
+ }
+}
+
+bool
+ntb_link_is_up(device_t ntb, enum ntb_speed *speed, enum ntb_width *width)
+{
+
+ return (NTB_LINK_IS_UP(device_get_parent(ntb), speed, width));
+}
+
+int
+ntb_link_enable(device_t ntb, enum ntb_speed speed, enum ntb_width width)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+ struct ntb_child **cpp = device_get_softc(device_get_parent(nc->dev));
+ struct ntb_child *nc1;
+
+ for (nc1 = *cpp; nc1 != NULL; nc1 = nc1->next) {
+ if (nc1->enabled) {
+ nc->enabled = 1;
+ return (0);
+ }
+ }
+ nc->enabled = 1;
+ return (NTB_LINK_ENABLE(device_get_parent(ntb), speed, width));
+}
+
+int
+ntb_link_disable(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+ struct ntb_child **cpp = device_get_softc(device_get_parent(nc->dev));
+ struct ntb_child *nc1;
+
+ if (!nc->enabled)
+ return (0);
+ nc->enabled = 0;
+ for (nc1 = *cpp; nc1 != NULL; nc1 = nc1->next) {
+ if (nc1->enabled)
+ return (0);
+ }
+ return (NTB_LINK_DISABLE(device_get_parent(ntb)));
+}
+
+bool
+ntb_link_enabled(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (nc->enabled && NTB_LINK_ENABLED(device_get_parent(ntb)));
+}
+
+int
+ntb_set_ctx(device_t ntb, void *ctx, const struct ntb_ctx_ops *ctx_ops)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ if (ctx == NULL || ctx_ops == NULL)
+ return (EINVAL);
+
+ rm_wlock(&nc->ctx_lock);
+ if (nc->ctx_ops != NULL) {
+ rm_wunlock(&nc->ctx_lock);
+ return (EINVAL);
+ }
+ nc->ctx = ctx;
+ nc->ctx_ops = ctx_ops;
+ rm_wunlock(&nc->ctx_lock);
+
+ return (0);
+}
+
+void *
+ntb_get_ctx(device_t ntb, const struct ntb_ctx_ops **ctx_ops)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ KASSERT(nc->ctx != NULL && nc->ctx_ops != NULL, ("bogus"));
+ if (ctx_ops != NULL)
+ *ctx_ops = nc->ctx_ops;
+ return (nc->ctx);
+}
+
+void
+ntb_clear_ctx(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ rm_wlock(&nc->ctx_lock);
+ nc->ctx = NULL;
+ nc->ctx_ops = NULL;
+ rm_wunlock(&nc->ctx_lock);
+}
+
+uint8_t
+ntb_mw_count(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (nc->mwcnt);
+}
+
+int
+ntb_mw_get_range(device_t ntb, unsigned mw_idx, vm_paddr_t *base,
+ caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
+ bus_addr_t *plimit)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_MW_GET_RANGE(device_get_parent(ntb), mw_idx + nc->mwoff,
+ base, vbase, size, align, align_size, plimit));
+}
+
+int
+ntb_mw_set_trans(device_t ntb, unsigned mw_idx, bus_addr_t addr, size_t size)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_MW_SET_TRANS(device_get_parent(ntb), mw_idx + nc->mwoff,
+ addr, size));
+}
+
+int
+ntb_mw_clear_trans(device_t ntb, unsigned mw_idx)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_MW_CLEAR_TRANS(device_get_parent(ntb), mw_idx + nc->mwoff));
+}
+
+int
+ntb_mw_get_wc(device_t ntb, unsigned mw_idx, vm_memattr_t *mode)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_MW_GET_WC(device_get_parent(ntb), mw_idx + nc->mwoff, mode));
+}
+
+int
+ntb_mw_set_wc(device_t ntb, unsigned mw_idx, vm_memattr_t mode)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_MW_SET_WC(device_get_parent(ntb), mw_idx + nc->mwoff, mode));
+}
+
+uint8_t
+ntb_spad_count(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (nc->spadcnt);
+}
+
+void
+ntb_spad_clear(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+ unsigned i;
+
+ for (i = 0; i < nc->spadcnt; i++)
+ NTB_SPAD_WRITE(device_get_parent(ntb), i + nc->spadoff, 0);
+}
+
+int
+ntb_spad_write(device_t ntb, unsigned int idx, uint32_t val)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_SPAD_WRITE(device_get_parent(ntb), idx + nc->spadoff, val));
+}
+
+int
+ntb_spad_read(device_t ntb, unsigned int idx, uint32_t *val)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_SPAD_READ(device_get_parent(ntb), idx + nc->spadoff, val));
+}
+
+int
+ntb_peer_spad_write(device_t ntb, unsigned int idx, uint32_t val)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_PEER_SPAD_WRITE(device_get_parent(ntb), idx + nc->spadoff,
+ val));
+}
+
+int
+ntb_peer_spad_read(device_t ntb, unsigned int idx, uint32_t *val)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_PEER_SPAD_READ(device_get_parent(ntb), idx + nc->spadoff,
+ val));
+}
+
+uint64_t
+ntb_db_valid_mask(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (nc->dbmask);
+}
+
+int
+ntb_db_vector_count(device_t ntb)
+{
+
+ return (NTB_DB_VECTOR_COUNT(device_get_parent(ntb)));
+}
+
+uint64_t
+ntb_db_vector_mask(device_t ntb, uint32_t vector)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return ((NTB_DB_VECTOR_MASK(device_get_parent(ntb), vector)
+ >> nc->dboff) & nc->dbmask);
+}
+
+int
+ntb_peer_db_addr(device_t ntb, bus_addr_t *db_addr, vm_size_t *db_size)
+{
+
+ return (NTB_PEER_DB_ADDR(device_get_parent(ntb), db_addr, db_size));
+}
+
+void
+ntb_db_clear(device_t ntb, uint64_t bits)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_DB_CLEAR(device_get_parent(ntb), bits << nc->dboff));
+}
+
+void
+ntb_db_clear_mask(device_t ntb, uint64_t bits)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_DB_CLEAR_MASK(device_get_parent(ntb), bits << nc->dboff));
+}
+
+uint64_t
+ntb_db_read(device_t ntb)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return ((NTB_DB_READ(device_get_parent(ntb)) >> nc->dboff)
+ & nc->dbmask);
+}
+
+void
+ntb_db_set_mask(device_t ntb, uint64_t bits)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_DB_SET_MASK(device_get_parent(ntb), bits << nc->dboff));
+}
+
+void
+ntb_peer_db_set(device_t ntb, uint64_t bits)
+{
+ struct ntb_child *nc = device_get_ivars(ntb);
+
+ return (NTB_PEER_DB_SET(device_get_parent(ntb), bits << nc->dboff));
+}
+
MODULE_VERSION(ntb, 1);
diff --git a/sys/dev/ntb/ntb.h b/sys/dev/ntb/ntb.h
index 5218cbd7be39..8593c65b4cf0 100644
--- a/sys/dev/ntb/ntb.h
+++ b/sys/dev/ntb/ntb.h
@@ -34,4 +34,376 @@
extern devclass_t ntb_hw_devclass;
SYSCTL_DECL(_hw_ntb);
+int ntb_register_device(device_t ntb);
+int ntb_unregister_device(device_t ntb);
+
+/*
+ * ntb_link_event() - notify driver context of a change in link status
+ * @ntb: NTB device context
+ *
+ * Notify the driver context that the link status may have changed. The driver
+ * should call intb_link_is_up() to get the current status.
+ */
+void ntb_link_event(device_t ntb);
+
+/*
+ * ntb_db_event() - notify driver context of a doorbell event
+ * @ntb: NTB device context
+ * @vector: Interrupt vector number
+ *
+ * Notify the driver context of a doorbell event. If hardware supports
+ * multiple interrupt vectors for doorbells, the vector number indicates which
+ * vector received the interrupt. The vector number is relative to the first
+ * vector used for doorbells, starting at zero, and must be less than
+ * ntb_db_vector_count(). The driver may call ntb_db_read() to check which
+ * doorbell bits need service, and ntb_db_vector_mask() to determine which of
+ * those bits are associated with the vector number.
+ */
+void ntb_db_event(device_t ntb, uint32_t vec);
+
+/*
+ * ntb_link_is_up() - get the current ntb link state
+ * @ntb: NTB device context
+ * @speed: OUT - The link speed expressed as PCIe generation number
+ * @width: OUT - The link width expressed as the number of PCIe lanes
+ *
+ * RETURNS: true or false based on the hardware link state
+ */
+bool ntb_link_is_up(device_t ntb, enum ntb_speed *speed, enum ntb_width *width);
+
+/*
+ * ntb_link_enable() - enable the link on the secondary side of the ntb
+ * @ntb: NTB device context
+ * @max_speed: The maximum link speed expressed as PCIe generation number[0]
+ * @max_width: The maximum link width expressed as the number of PCIe lanes[0]
+ *
+ * Enable the link on the secondary side of the ntb. This can only be done
+ * from the primary side of the ntb in primary or b2b topology. The ntb device
+ * should train the link to its maximum speed and width, or the requested speed
+ * and width, whichever is smaller, if supported.
+ *
+ * Return: Zero on success, otherwise an error number.
+ *
+ * [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed
+ * and width input will be ignored.
+ */
+int ntb_link_enable(device_t ntb, enum ntb_speed speed, enum ntb_width width);
+
+/*
+ * ntb_link_disable() - disable the link on the secondary side of the ntb
+ * @ntb: NTB device context
+ *
+ * Disable the link on the secondary side of the ntb. This can only be done
+ * from the primary side of the ntb in primary or b2b topology. The ntb device
+ * should disable the link. Returning from this call must indicate that a
+ * barrier has passed, though with no more writes may pass in either direction
+ * across the link, except if this call returns an error number.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+int ntb_link_disable(device_t ntb);
+
+/*
+ * get enable status of the link on the secondary side of the ntb
+ */
+bool ntb_link_enabled(device_t ntb);
+
+/*
+ * ntb_set_ctx() - associate a driver context with an ntb device
+ * @ntb: NTB device context
+ * @ctx: Driver context
+ * @ctx_ops: Driver context operations
+ *
+ * Associate a driver context and operations with a ntb device. The context is
+ * provided by the client driver, and the driver may associate a different
+ * context with each ntb device.
+ *
+ * Return: Zero if the context is associated, otherwise an error number.
+ */
+int ntb_set_ctx(device_t ntb, void *ctx, const struct ntb_ctx_ops *ctx_ops);
+
+/*
+ * ntb_set_ctx() - get a driver context associated with an ntb device
+ * @ntb: NTB device context
+ * @ctx_ops: Driver context operations
+ *
+ * Get a driver context and operations associated with a ntb device.
+ */
+void * ntb_get_ctx(device_t ntb, const struct ntb_ctx_ops **ctx_ops);
+
+/*
+ * ntb_clear_ctx() - disassociate any driver context from an ntb device
+ * @ntb: NTB device context
+ *
+ * Clear any association that may exist between a driver context and the ntb
+ * device.
+ */
+void ntb_clear_ctx(device_t ntb);
+
+/*
+ * ntb_mw_count() - Get the number of memory windows available for KPI
+ * consumers.
+ *
+ * (Excludes any MW wholly reserved for register access.)
+ */
+uint8_t ntb_mw_count(device_t ntb);
+
+/*
+ * ntb_mw_get_range() - get the range of a memory window
+ * @ntb: NTB device context
+ * @idx: Memory window number
+ * @base: OUT - the base address for mapping the memory window
+ * @size: OUT - the size for mapping the memory window
+ * @align: OUT - the base alignment for translating the memory window
+ * @align_size: OUT - the size alignment for translating the memory window
+ *
+ * Get the range of a memory window. NULL may be given for any output
+ * parameter if the value is not needed. The base and size may be used for
+ * mapping the memory window, to access the peer memory. The alignment and
+ * size may be used for translating the memory window, for the peer to access
+ * memory on the local system.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+int ntb_mw_get_range(device_t ntb, unsigned mw_idx, vm_paddr_t *base,
+ caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
+ bus_addr_t *plimit);
+
+/*
+ * ntb_mw_set_trans() - set the translation of a memory window
+ * @ntb: NTB device context
+ * @idx: Memory window number
+ * @addr: The dma address local memory to expose to the peer
+ * @size: The size of the local memory to expose to the peer
+ *
+ * Set the translation of a memory window. The peer may access local memory
+ * through the window starting at the address, up to the size. The address
+ * must be aligned to the alignment specified by ntb_mw_get_range(). The size
+ * must be aligned to the size alignment specified by ntb_mw_get_range(). The
+ * address must be below the plimit specified by ntb_mw_get_range() (i.e. for
+ * 32-bit BARs).
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+int ntb_mw_set_trans(device_t ntb, unsigned mw_idx, bus_addr_t addr,
+ size_t size);
+
+/*
+ * ntb_mw_clear_trans() - clear the translation of a memory window
+ * @ntb: NTB device context
+ * @idx: Memory window number
+ *
+ * Clear the translation of a memory window. The peer may no longer access
+ * local memory through the window.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+int ntb_mw_clear_trans(device_t ntb, unsigned mw_idx);
+
+/*
+ * ntb_mw_get_wc - Get the write-combine status of a memory window
+ *
+ * Returns: Zero on success, setting *wc; otherwise an error number (e.g. if
+ * idx is an invalid memory window).
+ *
+ * Mode is a VM_MEMATTR_* type.
+ */
+int ntb_mw_get_wc(device_t ntb, unsigned mw_idx, vm_memattr_t *mode);
+
+/*
+ * ntb_mw_set_wc - Set the write-combine status of a memory window
+ *
+ * If 'mode' matches the current status, this does nothing and succeeds. Mode
+ * is a VM_MEMATTR_* type.
+ *
+ * Returns: Zero on success, setting the caching attribute on the virtual
+ * mapping of the BAR; otherwise an error number (e.g. if idx is an invalid
+ * memory window, or if changing the caching attribute fails).
+ */
+int ntb_mw_set_wc(device_t ntb, unsigned mw_idx, vm_memattr_t mode);
+
+/*
+ * ntb_spad_count() - get the total scratch regs usable
+ * @ntb: pointer to ntb_softc instance
+ *
+ * This function returns the max 32bit scratchpad registers usable by the
+ * upper layer.
+ *
+ * RETURNS: total number of scratch pad registers available
+ */
+uint8_t ntb_spad_count(device_t ntb);
+
+/*
+ * ntb_get_max_spads() - zero local scratch registers
+ * @ntb: pointer to ntb_softc instance
+ *
+ * This functions overwrites all local scratchpad registers with zeroes.
+ */
+void ntb_spad_clear(device_t ntb);
+
+/*
+ * ntb_spad_write() - write to the secondary scratchpad register
+ * @ntb: pointer to ntb_softc instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. The register resides on the secondary (external) side.
+ *
+ * RETURNS: An appropriate ERRNO error value on error, or zero for success.
+ */
+int ntb_spad_write(device_t ntb, unsigned int idx, uint32_t val);
+
+/*
+ * ntb_spad_read() - read from the primary scratchpad register
+ * @ntb: pointer to ntb_softc instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side.
+ *
+ * RETURNS: An appropriate ERRNO error value on error, or zero for success.
+ */
+int ntb_spad_read(device_t ntb, unsigned int idx, uint32_t *val);
+
+/*
+ * ntb_peer_spad_write() - write to the secondary scratchpad register
+ * @ntb: pointer to ntb_softc instance
+ * @idx: index to the scratchpad register, 0 based
+ * @val: the data value to put into the register
+ *
+ * This function allows writing of a 32bit value to the indexed scratchpad
+ * register. The register resides on the secondary (external) side.
+ *
+ * RETURNS: An appropriate ERRNO error value on error, or zero for success.
+ */
+int ntb_peer_spad_write(device_t ntb, unsigned int idx, uint32_t val);
+
+/*
+ * ntb_peer_spad_read() - read from the primary scratchpad register
+ * @ntb: pointer to ntb_softc instance
+ * @idx: index to scratchpad register, 0 based
+ * @val: pointer to 32bit integer for storing the register value
+ *
+ * This function allows reading of the 32bit scratchpad register on
+ * the primary (internal) side.
+ *
+ * RETURNS: An appropriate ERRNO error value on error, or zero for success.
+ */
+int ntb_peer_spad_read(device_t ntb, unsigned int idx, uint32_t *val);
+
+/*
+ * ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
+ * @ntb: NTB device context
+ *
+ * Hardware may support different number or arrangement of doorbell bits.
+ *
+ * Return: A mask of doorbell bits supported by the ntb.
+ */
+uint64_t ntb_db_valid_mask(device_t ntb);
+
+/*
+ * ntb_db_vector_count() - get the number of doorbell interrupt vectors
+ * @ntb: NTB device context.
+ *
+ * Hardware may support different number of interrupt vectors.
+ *
+ * Return: The number of doorbell interrupt vectors.
+ */
+int ntb_db_vector_count(device_t ntb);
+
+/*
+ * ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
+ * @ntb: NTB device context
+ * @vector: Doorbell vector number
+ *
+ * Each interrupt vector may have a different number or arrangement of bits.
+ *
+ * Return: A mask of doorbell bits serviced by a vector.
+ */
+uint64_t ntb_db_vector_mask(device_t ntb, uint32_t vector);
+
+/*
+ * ntb_peer_db_addr() - address and size of the peer doorbell register
+ * @ntb: NTB device context.
+ * @db_addr: OUT - The address of the peer doorbell register.
+ * @db_size: OUT - The number of bytes to write the peer doorbell register.
+ *
+ * Return the address of the peer doorbell register. This may be used, for
+ * example, by drivers that offload memory copy operations to a dma engine.
+ * The drivers may wish to ring the peer doorbell at the completion of memory
+ * copy operations. For efficiency, and to simplify ordering of operations
+ * between the dma memory copies and the ringing doorbell, the driver may
+ * append one additional dma memory copy with the doorbell register as the
+ * destination, after the memory copy operations.
+ *
+ * Return: Zero on success, otherwise an error number.
+ *
+ * Note that writing the peer doorbell via a memory window will *not* generate
+ * an interrupt on the remote host; that must be done separately.
+ */
+int ntb_peer_db_addr(device_t ntb, bus_addr_t *db_addr, vm_size_t *db_size);
+
+/*
+ * ntb_db_clear() - clear bits in the local doorbell register
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell register, arming the bits for the next
+ * doorbell.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+void ntb_db_clear(device_t ntb, uint64_t bits);
+
+/*
+ * ntb_db_clear_mask() - clear bits in the local doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell bits to clear.
+ *
+ * Clear bits in the local doorbell mask register, allowing doorbell interrupts
+ * from being generated for those doorbell bits. If a doorbell bit is already
+ * set at the time the mask is cleared, and the corresponding mask bit is
+ * changed from set to clear, then the ntb driver must ensure that
+ * ntb_db_event() is called. If the hardware does not generate the interrupt
+ * on clearing the mask bit, then the driver must call ntb_db_event() anyway.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+void ntb_db_clear_mask(device_t ntb, uint64_t bits);
+
+/*
+ * ntb_db_read() - read the local doorbell register
+ * @ntb: NTB device context.
+ *
+ * Read the local doorbell register, and return the bits that are set.
+ *
+ * Return: The bits currently set in the local doorbell register.
+ */
+uint64_t ntb_db_read(device_t ntb);
+
+/*
+ * ntb_db_set_mask() - set bits in the local doorbell mask
+ * @ntb: NTB device context.
+ * @db_bits: Doorbell mask bits to set.
+ *
+ * Set bits in the local doorbell mask register, preventing doorbell interrupts
+ * from being generated for those doorbell bits. Bits that were already set
+ * must remain set.
+ *
+ * Return: Zero on success, otherwise an error number.
+ */
+void ntb_db_set_mask(device_t ntb, uint64_t bits);
+
+/*
+ * ntb_peer_db_set() - Set the doorbell on the secondary/external side
+ * @ntb: pointer to ntb_softc instance
+ * @bit: doorbell bits to ring
+ *
+ * This function allows triggering of a doorbell on the secondary/external
+ * side that will initiate an interrupt on the remote host
+ */
+void ntb_peer_db_set(device_t ntb, uint64_t bits);
+
#endif /* _NTB_H_ */
diff --git a/sys/dev/ntb/ntb_hw/ntb_hw.c b/sys/dev/ntb/ntb_hw/ntb_hw.c
index 071ef42b55e4..dac36997cdb1 100644
--- a/sys/dev/ntb/ntb_hw/ntb_hw.c
+++ b/sys/dev/ntb/ntb_hw/ntb_hw.c
@@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$");
#include <sys/pciio.h>
#include <sys/queue.h>
#include <sys/rman.h>
-#include <sys/rmlock.h>
#include <sys/sbuf.h>
#include <sys/sysctl.h>
#include <vm/vm.h>
@@ -201,6 +200,9 @@ struct ntb_msix_data {
};
struct ntb_softc {
+ /* ntb.c context. Do not move! Must go first! */
+ void *ntb_store;
+
device_t device;
enum ntb_device_type type;
uint32_t features;
@@ -219,10 +221,7 @@ struct ntb_softc {
struct callout heartbeat_timer;
struct callout lr_timer;
- void *ntb_ctx;
- const struct ntb_ctx_ops *ctx_ops;
struct ntb_vec *msix_vec;
- struct rmlock ctx_lock;
uint32_t ppd;
enum ntb_conn_type conn_type;
@@ -284,72 +283,74 @@ bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
}
#endif
-#define ntb_bar_read(SIZE, bar, offset) \
+#define intel_ntb_bar_read(SIZE, bar, offset) \
bus_space_read_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
ntb->bar_info[(bar)].pci_bus_handle, (offset))
-#define ntb_bar_write(SIZE, bar, offset, val) \
+#define intel_ntb_bar_write(SIZE, bar, offset, val) \
bus_space_write_ ## SIZE (ntb->bar_info[(bar)].pci_bus_tag, \
ntb->bar_info[(bar)].pci_bus_handle, (offset), (val))
-#define ntb_reg_read(SIZE, offset) ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
-#define ntb_reg_write(SIZE, offset, val) \
- ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
-#define ntb_mw_read(SIZE, offset) \
- ntb_bar_read(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), offset)
-#define ntb_mw_write(SIZE, offset, val) \
- ntb_bar_write(SIZE, ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
+#define intel_ntb_reg_read(SIZE, offset) \
+ intel_ntb_bar_read(SIZE, NTB_CONFIG_BAR, offset)
+#define intel_ntb_reg_write(SIZE, offset, val) \
+ intel_ntb_bar_write(SIZE, NTB_CONFIG_BAR, offset, val)
+#define intel_ntb_mw_read(SIZE, offset) \
+ intel_ntb_bar_read(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
+ offset)
+#define intel_ntb_mw_write(SIZE, offset, val) \
+ intel_ntb_bar_write(SIZE, intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx), \
offset, val)
-static int ntb_probe(device_t device);
-static int ntb_attach(device_t device);
-static int ntb_detach(device_t device);
-static uint64_t ntb_db_valid_mask(device_t dev);
-static void ntb_spad_clear(device_t dev);
-static uint64_t ntb_db_vector_mask(device_t dev, uint32_t vector);
-static bool ntb_link_is_up(device_t dev, enum ntb_speed *speed,
+static int intel_ntb_probe(device_t device);
+static int intel_ntb_attach(device_t device);
+static int intel_ntb_detach(device_t device);
+static uint64_t intel_ntb_db_valid_mask(device_t dev);
+static void intel_ntb_spad_clear(device_t dev);
+static uint64_t intel_ntb_db_vector_mask(device_t dev, uint32_t vector);
+static bool intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed,
enum ntb_width *width);
-static int ntb_link_enable(device_t dev, enum ntb_speed speed,
+static int intel_ntb_link_enable(device_t dev, enum ntb_speed speed,
enum ntb_width width);
-static int ntb_link_disable(device_t dev);
-static int ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val);
-static int ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val);
+static int intel_ntb_link_disable(device_t dev);
+static int intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val);
+static int intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val);
-static unsigned ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
-static inline enum ntb_bar ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
+static unsigned intel_ntb_user_mw_to_idx(struct ntb_softc *, unsigned uidx);
+static inline enum ntb_bar intel_ntb_mw_to_bar(struct ntb_softc *, unsigned mw);
static inline bool bar_is_64bit(struct ntb_softc *, enum ntb_bar);
static inline void bar_get_xlat_params(struct ntb_softc *, enum ntb_bar,
uint32_t *base, uint32_t *xlat, uint32_t *lmt);
-static int ntb_map_pci_bars(struct ntb_softc *ntb);
-static int ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
+static int intel_ntb_map_pci_bars(struct ntb_softc *ntb);
+static int intel_ntb_mw_set_wc_internal(struct ntb_softc *, unsigned idx,
vm_memattr_t);
static void print_map_success(struct ntb_softc *, struct ntb_pci_bar_info *,
const char *);
static int map_mmr_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar);
static int map_memory_window_bar(struct ntb_softc *ntb,
struct ntb_pci_bar_info *bar);
-static void ntb_unmap_pci_bar(struct ntb_softc *ntb);
-static int ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
-static int ntb_init_isr(struct ntb_softc *ntb);
-static int ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
-static int ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
-static void ntb_teardown_interrupts(struct ntb_softc *ntb);
-static inline uint64_t ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
-static void ntb_interrupt(struct ntb_softc *, uint32_t vec);
+static void intel_ntb_unmap_pci_bar(struct ntb_softc *ntb);
+static int intel_ntb_remap_msix(device_t, uint32_t desired, uint32_t avail);
+static int intel_ntb_init_isr(struct ntb_softc *ntb);
+static int intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb);
+static int intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors);
+static void intel_ntb_teardown_interrupts(struct ntb_softc *ntb);
+static inline uint64_t intel_ntb_vec_mask(struct ntb_softc *, uint64_t db_vector);
+static void intel_ntb_interrupt(struct ntb_softc *, uint32_t vec);
static void ndev_vec_isr(void *arg);
static void ndev_irq_isr(void *arg);
static inline uint64_t db_ioread(struct ntb_softc *, uint64_t regoff);
static inline void db_iowrite(struct ntb_softc *, uint64_t regoff, uint64_t);
static inline void db_iowrite_raw(struct ntb_softc *, uint64_t regoff, uint64_t);
-static int ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
-static void ntb_free_msix_vec(struct ntb_softc *ntb);
-static void ntb_get_msix_info(struct ntb_softc *ntb);
-static void ntb_exchange_msix(void *);
-static struct ntb_hw_info *ntb_get_device_info(uint32_t device_id);
-static void ntb_detect_max_mw(struct ntb_softc *ntb);
-static int ntb_detect_xeon(struct ntb_softc *ntb);
-static int ntb_detect_atom(struct ntb_softc *ntb);
-static int ntb_xeon_init_dev(struct ntb_softc *ntb);
-static int ntb_atom_init_dev(struct ntb_softc *ntb);
-static void ntb_teardown_xeon(struct ntb_softc *ntb);
+static int intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors);
+static void intel_ntb_free_msix_vec(struct ntb_softc *ntb);
+static void intel_ntb_get_msix_info(struct ntb_softc *ntb);
+static void intel_ntb_exchange_msix(void *);
+static struct ntb_hw_info *intel_ntb_get_device_info(uint32_t device_id);
+static void intel_ntb_detect_max_mw(struct ntb_softc *ntb);
+static int intel_ntb_detect_xeon(struct ntb_softc *ntb);
+static int intel_ntb_detect_atom(struct ntb_softc *ntb);
+static int intel_ntb_xeon_init_dev(struct ntb_softc *ntb);
+static int intel_ntb_atom_init_dev(struct ntb_softc *ntb);
+static void intel_ntb_teardown_xeon(struct ntb_softc *ntb);
static void configure_atom_secondary_side_bars(struct ntb_softc *ntb);
static void xeon_reset_sbar_size(struct ntb_softc *, enum ntb_bar idx,
enum ntb_bar regbar);
@@ -362,15 +363,13 @@ static int xeon_setup_b2b_mw(struct ntb_softc *,
static inline bool link_is_up(struct ntb_softc *ntb);
static inline bool _xeon_link_is_up(struct ntb_softc *ntb);
static inline bool atom_link_is_err(struct ntb_softc *ntb);
-static inline enum ntb_speed ntb_link_sta_speed(struct ntb_softc *);
-static inline enum ntb_width ntb_link_sta_width(struct ntb_softc *);
+static inline enum ntb_speed intel_ntb_link_sta_speed(struct ntb_softc *);
+static inline enum ntb_width intel_ntb_link_sta_width(struct ntb_softc *);
static void atom_link_hb(void *arg);
-static void ntb_link_event(device_t dev);
-static void ntb_db_event(device_t dev, uint32_t vec);
static void recover_atom_link(void *arg);
-static bool ntb_poll_link(struct ntb_softc *ntb);
+static bool intel_ntb_poll_link(struct ntb_softc *ntb);
static void save_bar_parameters(struct ntb_pci_bar_info *bar);
-static void ntb_sysctl_init(struct ntb_softc *);
+static void intel_ntb_sysctl_init(struct ntb_softc *);
static int sysctl_handle_features(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS);
static int sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS);
@@ -380,7 +379,7 @@ static int sysctl_handle_register(SYSCTL_HANDLER_ARGS);
static unsigned g_ntb_hw_debug_level;
SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
&g_ntb_hw_debug_level, 0, "ntb_hw log level -- higher is more verbose");
-#define ntb_printf(lvl, ...) do { \
+#define intel_ntb_printf(lvl, ...) do { \
if ((lvl) <= g_ntb_hw_debug_level) { \
device_printf(ntb->device, __VA_ARGS__); \
} \
@@ -403,7 +402,7 @@ SYSCTL_UINT(_hw_ntb, OID_AUTO, default_mw_pat, CTLFLAG_RDTUN,
"UC-: " __XSTRING(_NTB_PAT_UCM));
static inline vm_memattr_t
-ntb_pat_flags(void)
+intel_ntb_pat_flags(void)
{
switch (g_ntb_mw_pat) {
@@ -429,7 +428,7 @@ ntb_pat_flags(void)
* anywhere better yet.
*/
static inline const char *
-ntb_vm_memattr_to_str(vm_memattr_t pat)
+intel_ntb_vm_memattr_to_str(vm_memattr_t pat)
{
switch (pat) {
@@ -621,11 +620,11 @@ MALLOC_DEFINE(M_NTB, "ntb_hw", "ntb_hw driver memory allocations");
* OS <-> Driver linkage functions
*/
static int
-ntb_probe(device_t device)
+intel_ntb_probe(device_t device)
{
struct ntb_hw_info *p;
- p = ntb_get_device_info(pci_get_devid(device));
+ p = intel_ntb_get_device_info(pci_get_devid(device));
if (p == NULL)
return (ENXIO);
@@ -634,14 +633,14 @@ ntb_probe(device_t device)
}
static int
-ntb_attach(device_t device)
+intel_ntb_attach(device_t device)
{
struct ntb_softc *ntb;
struct ntb_hw_info *p;
int error;
ntb = device_get_softc(device);
- p = ntb_get_device_info(pci_get_devid(device));
+ p = intel_ntb_get_device_info(pci_get_devid(device));
ntb->device = device;
ntb->type = p->type;
@@ -654,54 +653,52 @@ ntb_attach(device_t device)
callout_init(&ntb->lr_timer, 1);
callout_init(&ntb->peer_msix_work, 1);
mtx_init(&ntb->db_mask_lock, "ntb hw bits", NULL, MTX_SPIN);
- rm_init(&ntb->ctx_lock, "ntb ctx");
if (ntb->type == NTB_ATOM)
- error = ntb_detect_atom(ntb);
+ error = intel_ntb_detect_atom(ntb);
else
- error = ntb_detect_xeon(ntb);
+ error = intel_ntb_detect_xeon(ntb);
if (error != 0)
goto out;
- ntb_detect_max_mw(ntb);
+ intel_ntb_detect_max_mw(ntb);
pci_enable_busmaster(ntb->device);
- error = ntb_map_pci_bars(ntb);
+ error = intel_ntb_map_pci_bars(ntb);
if (error != 0)
goto out;
if (ntb->type == NTB_ATOM)
- error = ntb_atom_init_dev(ntb);
+ error = intel_ntb_atom_init_dev(ntb);
else
- error = ntb_xeon_init_dev(ntb);
+ error = intel_ntb_xeon_init_dev(ntb);
if (error != 0)
goto out;
- ntb_spad_clear(device);
+ intel_ntb_spad_clear(device);
- ntb_poll_link(ntb);
+ intel_ntb_poll_link(ntb);
- ntb_sysctl_init(ntb);
+ intel_ntb_sysctl_init(ntb);
/* Attach children to this controller */
- device_add_child(device, NULL, -1);
- bus_generic_attach(device);
+ error = ntb_register_device(device);
out:
if (error != 0)
- ntb_detach(device);
+ intel_ntb_detach(device);
return (error);
}
static int
-ntb_detach(device_t device)
+intel_ntb_detach(device_t device)
{
struct ntb_softc *ntb;
ntb = device_get_softc(device);
/* Detach & delete all children */
- device_delete_children(device);
+ ntb_unregister_device(device);
if (ntb->self_reg != NULL) {
DB_MASK_LOCK(ntb);
@@ -713,13 +710,12 @@ ntb_detach(device_t device)
callout_drain(&ntb->peer_msix_work);
pci_disable_busmaster(ntb->device);
if (ntb->type == NTB_XEON)
- ntb_teardown_xeon(ntb);
- ntb_teardown_interrupts(ntb);
+ intel_ntb_teardown_xeon(ntb);
+ intel_ntb_teardown_interrupts(ntb);
mtx_destroy(&ntb->db_mask_lock);
- rm_destroy(&ntb->ctx_lock);
- ntb_unmap_pci_bar(ntb);
+ intel_ntb_unmap_pci_bar(ntb);
return (0);
}
@@ -728,7 +724,7 @@ ntb_detach(device_t device)
* Driver internal routines
*/
static inline enum ntb_bar
-ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
+intel_ntb_mw_to_bar(struct ntb_softc *ntb, unsigned mw)
{
KASSERT(mw < ntb->mw_count,
@@ -784,7 +780,7 @@ bar_get_xlat_params(struct ntb_softc *ntb, enum ntb_bar bar, uint32_t *base,
}
static int
-ntb_map_pci_bars(struct ntb_softc *ntb)
+intel_ntb_map_pci_bars(struct ntb_softc *ntb)
{
int rc;
@@ -910,7 +906,7 @@ map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
* Optionally, mark MW BARs as anything other than UC to improve
* performance.
*/
- mapmode = ntb_pat_flags();
+ mapmode = intel_ntb_pat_flags();
if (mapmode == bar->map_mode)
return (0);
@@ -923,7 +919,7 @@ map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
(char *)bar->vbase + bar->size - 1,
(void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
- ntb_vm_memattr_to_str(mapmode));
+ intel_ntb_vm_memattr_to_str(mapmode));
} else
device_printf(ntb->device,
"Unable to mark BAR%d v:[%p-%p] p:[%p-%p] as "
@@ -931,13 +927,13 @@ map_memory_window_bar(struct ntb_softc *ntb, struct ntb_pci_bar_info *bar)
PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
(char *)bar->vbase + bar->size - 1,
(void *)bar->pbase, (void *)(bar->pbase + bar->size - 1),
- ntb_vm_memattr_to_str(mapmode), rc);
+ intel_ntb_vm_memattr_to_str(mapmode), rc);
/* Proceed anyway */
return (0);
}
static void
-ntb_unmap_pci_bar(struct ntb_softc *ntb)
+intel_ntb_unmap_pci_bar(struct ntb_softc *ntb)
{
struct ntb_pci_bar_info *current_bar;
int i;
@@ -952,7 +948,7 @@ ntb_unmap_pci_bar(struct ntb_softc *ntb)
}
static int
-ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
+intel_ntb_setup_msix(struct ntb_softc *ntb, uint32_t num_vectors)
{
uint32_t i;
int rc;
@@ -1005,7 +1001,7 @@ SYSCTL_INT(_hw_ntb, OID_AUTO, prefer_intx_to_remap, CTLFLAG_RDTUN,
* round-robin fashion.
*/
static int
-ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
+intel_ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
{
u_int *vectors;
uint32_t i;
@@ -1025,7 +1021,7 @@ ntb_remap_msix(device_t dev, uint32_t desired, uint32_t avail)
}
static int
-ntb_init_isr(struct ntb_softc *ntb)
+intel_ntb_init_isr(struct ntb_softc *ntb)
{
uint32_t desired_vectors, num_vectors;
int rc;
@@ -1051,7 +1047,7 @@ ntb_init_isr(struct ntb_softc *ntb)
num_vectors--;
if (rc == 0 && num_vectors < desired_vectors) {
- rc = ntb_remap_msix(ntb->device, desired_vectors,
+ rc = intel_ntb_remap_msix(ntb->device, desired_vectors,
num_vectors);
if (rc == 0)
num_vectors = desired_vectors;
@@ -1072,7 +1068,7 @@ ntb_init_isr(struct ntb_softc *ntb)
ntb->db_vec_count = 1;
ntb->db_vec_shift = XEON_DB_TOTAL_SHIFT;
- rc = ntb_setup_legacy_interrupt(ntb);
+ rc = intel_ntb_setup_legacy_interrupt(ntb);
} else {
if (num_vectors - 1 != XEON_NONLINK_DB_MSIX_BITS &&
HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
@@ -1082,22 +1078,20 @@ ntb_init_isr(struct ntb_softc *ntb)
return (EINVAL);
}
- ntb_create_msix_vec(ntb, num_vectors);
- rc = ntb_setup_msix(ntb, num_vectors);
- if (rc == 0 && HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP))
- ntb_get_msix_info(ntb);
+ intel_ntb_create_msix_vec(ntb, num_vectors);
+ rc = intel_ntb_setup_msix(ntb, num_vectors);
}
if (rc != 0) {
device_printf(ntb->device,
"Error allocating interrupts: %d\n", rc);
- ntb_free_msix_vec(ntb);
+ intel_ntb_free_msix_vec(ntb);
}
return (rc);
}
static int
-ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
+intel_ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
{
int rc;
@@ -1124,7 +1118,7 @@ ntb_setup_legacy_interrupt(struct ntb_softc *ntb)
}
static void
-ntb_teardown_interrupts(struct ntb_softc *ntb)
+intel_ntb_teardown_interrupts(struct ntb_softc *ntb)
{
struct ntb_int_info *current_int;
int i;
@@ -1140,7 +1134,7 @@ ntb_teardown_interrupts(struct ntb_softc *ntb)
rman_get_rid(current_int->res), current_int->res);
}
- ntb_free_msix_vec(ntb);
+ intel_ntb_free_msix_vec(ntb);
pci_release_msi(ntb->device);
}
@@ -1153,11 +1147,11 @@ db_ioread(struct ntb_softc *ntb, uint64_t regoff)
{
if (ntb->type == NTB_ATOM)
- return (ntb_reg_read(8, regoff));
+ return (intel_ntb_reg_read(8, regoff));
KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
- return (ntb_reg_read(2, regoff));
+ return (intel_ntb_reg_read(2, regoff));
}
static inline void
@@ -1179,16 +1173,16 @@ db_iowrite_raw(struct ntb_softc *ntb, uint64_t regoff, uint64_t val)
{
if (ntb->type == NTB_ATOM) {
- ntb_reg_write(8, regoff, val);
+ intel_ntb_reg_write(8, regoff, val);
return;
}
KASSERT(ntb->type == NTB_XEON, ("bad ntb type"));
- ntb_reg_write(2, regoff, (uint16_t)val);
+ intel_ntb_reg_write(2, regoff, (uint16_t)val);
}
static void
-ntb_db_set_mask(device_t dev, uint64_t bits)
+intel_ntb_db_set_mask(device_t dev, uint64_t bits)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -1200,7 +1194,7 @@ ntb_db_set_mask(device_t dev, uint64_t bits)
}
static void
-ntb_db_clear_mask(device_t dev, uint64_t bits)
+intel_ntb_db_clear_mask(device_t dev, uint64_t bits)
{
struct ntb_softc *ntb = device_get_softc(dev);
uint64_t ibits;
@@ -1217,7 +1211,7 @@ ntb_db_clear_mask(device_t dev, uint64_t bits)
if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
/* Simulate fake interrupts if unmasked DB bits are set. */
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- if ((ibits & ntb_db_vector_mask(dev, i)) != 0)
+ if ((ibits & intel_ntb_db_vector_mask(dev, i)) != 0)
swi_sched(ntb->int_info[i].tag, 0);
}
} else {
@@ -1227,7 +1221,7 @@ ntb_db_clear_mask(device_t dev, uint64_t bits)
}
static uint64_t
-ntb_db_read(device_t dev)
+intel_ntb_db_read(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -1238,7 +1232,7 @@ ntb_db_read(device_t dev)
}
static void
-ntb_db_clear(device_t dev, uint64_t bits)
+intel_ntb_db_clear(device_t dev, uint64_t bits)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -1258,7 +1252,7 @@ ntb_db_clear(device_t dev, uint64_t bits)
}
static inline uint64_t
-ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
+intel_ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
{
uint64_t shift, mask;
@@ -1281,15 +1275,15 @@ ntb_vec_mask(struct ntb_softc *ntb, uint64_t db_vector)
}
static void
-ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
+intel_ntb_interrupt(struct ntb_softc *ntb, uint32_t vec)
{
uint64_t vec_mask;
ntb->last_ts = ticks;
- vec_mask = ntb_vec_mask(ntb, vec);
+ vec_mask = intel_ntb_vec_mask(ntb, vec);
if ((vec_mask & ntb->db_link_mask) != 0) {
- if (ntb_poll_link(ntb))
+ if (intel_ntb_poll_link(ntb))
ntb_link_event(ntb->device);
}
@@ -1318,18 +1312,18 @@ ndev_vec_isr(void *arg)
{
struct ntb_vec *nvec = arg;
- ntb_interrupt(nvec->ntb, nvec->num);
+ intel_ntb_interrupt(nvec->ntb, nvec->num);
}
static void
ndev_irq_isr(void *arg)
{
/* If we couldn't set up MSI-X, we only have the one vector. */
- ntb_interrupt(arg, 0);
+ intel_ntb_interrupt(arg, 0);
}
static int
-ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
+intel_ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
{
uint32_t i;
@@ -1344,7 +1338,7 @@ ntb_create_msix_vec(struct ntb_softc *ntb, uint32_t num_vectors)
}
static void
-ntb_free_msix_vec(struct ntb_softc *ntb)
+intel_ntb_free_msix_vec(struct ntb_softc *ntb)
{
if (ntb->msix_vec == NULL)
@@ -1355,7 +1349,7 @@ ntb_free_msix_vec(struct ntb_softc *ntb)
}
static void
-ntb_get_msix_info(struct ntb_softc *ntb)
+intel_ntb_get_msix_info(struct ntb_softc *ntb)
{
struct pci_devinfo *dinfo;
struct pcicfg_msix *msix;
@@ -1371,7 +1365,7 @@ ntb_get_msix_info(struct ntb_softc *ntb)
laddr = bus_read_4(msix->msix_table_res, offset +
PCI_MSIX_ENTRY_LOWER_ADDR);
- ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
+ intel_ntb_printf(2, "local MSIX addr(%u): 0x%x\n", i, laddr);
KASSERT((laddr & MSI_INTEL_ADDR_BASE) == MSI_INTEL_ADDR_BASE,
("local MSIX addr 0x%x not in MSI base 0x%x", laddr,
@@ -1380,14 +1374,14 @@ ntb_get_msix_info(struct ntb_softc *ntb)
data = bus_read_4(msix->msix_table_res, offset +
PCI_MSIX_ENTRY_DATA);
- ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
+ intel_ntb_printf(2, "local MSIX data(%u): 0x%x\n", i, data);
ntb->msix_data[i].nmd_data = data;
}
}
static struct ntb_hw_info *
-ntb_get_device_info(uint32_t device_id)
+intel_ntb_get_device_info(uint32_t device_id)
{
struct ntb_hw_info *ep = pci_ids;
@@ -1400,15 +1394,15 @@ ntb_get_device_info(uint32_t device_id)
}
static void
-ntb_teardown_xeon(struct ntb_softc *ntb)
+intel_ntb_teardown_xeon(struct ntb_softc *ntb)
{
if (ntb->reg != NULL)
- ntb_link_disable(ntb->device);
+ intel_ntb_link_disable(ntb->device);
}
static void
-ntb_detect_max_mw(struct ntb_softc *ntb)
+intel_ntb_detect_max_mw(struct ntb_softc *ntb)
{
if (ntb->type == NTB_ATOM) {
@@ -1423,7 +1417,7 @@ ntb_detect_max_mw(struct ntb_softc *ntb)
}
static int
-ntb_detect_xeon(struct ntb_softc *ntb)
+intel_ntb_detect_xeon(struct ntb_softc *ntb)
{
uint8_t ppd, conn_type;
@@ -1471,7 +1465,7 @@ ntb_detect_xeon(struct ntb_softc *ntb)
}
static int
-ntb_detect_atom(struct ntb_softc *ntb)
+intel_ntb_detect_atom(struct ntb_softc *ntb)
{
uint32_t ppd, conn_type;
@@ -1496,7 +1490,7 @@ ntb_detect_atom(struct ntb_softc *ntb)
}
static int
-ntb_xeon_init_dev(struct ntb_softc *ntb)
+intel_ntb_xeon_init_dev(struct ntb_softc *ntb)
{
int rc;
@@ -1521,9 +1515,9 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
ntb->fake_db_bell = 0;
ntb->msix_mw_idx = (ntb->mw_count + g_ntb_msix_idx) %
ntb->mw_count;
- ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
+ intel_ntb_printf(2, "Setting up MSIX mw idx %d means %u\n",
g_ntb_msix_idx, ntb->msix_mw_idx);
- rc = ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
+ rc = intel_ntb_mw_set_wc_internal(ntb, ntb->msix_mw_idx,
VM_MEMATTR_UNCACHEABLE);
KASSERT(rc == 0, ("shouldn't fail"));
} else if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
@@ -1536,9 +1530,9 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
*/
ntb->b2b_mw_idx = (ntb->mw_count + g_ntb_mw_idx) %
ntb->mw_count;
- ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
+ intel_ntb_printf(2, "Setting up b2b mw idx %d means %u\n",
g_ntb_mw_idx, ntb->b2b_mw_idx);
- rc = ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
+ rc = intel_ntb_mw_set_wc_internal(ntb, ntb->b2b_mw_idx,
VM_MEMATTR_UNCACHEABLE);
KASSERT(rc == 0, ("shouldn't fail"));
} else if (HAS_FEATURE(ntb, NTB_B2BDOORBELL_BIT14))
@@ -1564,7 +1558,7 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
return (rc);
/* Enable Bus Master and Memory Space on the secondary side */
- ntb_reg_write(2, XEON_SPCICMD_OFFSET,
+ intel_ntb_reg_write(2, XEON_SPCICMD_OFFSET,
PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
/*
@@ -1575,12 +1569,12 @@ ntb_xeon_init_dev(struct ntb_softc *ntb)
db_iowrite(ntb, ntb->self_reg->db_mask, ntb->db_mask);
DB_MASK_UNLOCK(ntb);
- rc = ntb_init_isr(ntb);
+ rc = intel_ntb_init_isr(ntb);
return (rc);
}
static int
-ntb_atom_init_dev(struct ntb_softc *ntb)
+intel_ntb_atom_init_dev(struct ntb_softc *ntb)
{
int error;
@@ -1607,15 +1601,15 @@ ntb_atom_init_dev(struct ntb_softc *ntb)
configure_atom_secondary_side_bars(ntb);
/* Enable Bus Master and Memory Space on the secondary side */
- ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
+ intel_ntb_reg_write(2, ATOM_SPCICMD_OFFSET,
PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN);
- error = ntb_init_isr(ntb);
+ error = intel_ntb_init_isr(ntb);
if (error != 0)
return (error);
/* Initiate PCI-E link training */
- ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
callout_reset(&ntb->heartbeat_timer, 0, atom_link_hb, ntb);
@@ -1628,19 +1622,19 @@ configure_atom_secondary_side_bars(struct ntb_softc *ntb)
{
if (ntb->dev_type == NTB_DEV_USD) {
- ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
XEON_B2B_BAR4_ADDR64);
- ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
} else {
- ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR2XLAT_OFFSET,
XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
+ intel_ntb_reg_write(8, ATOM_PBAR4XLAT_OFFSET,
XEON_B2B_BAR4_ADDR64);
- ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
- ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR23_OFFSET, XEON_B2B_BAR2_ADDR64);
+ intel_ntb_reg_write(8, ATOM_MBAR45_OFFSET, XEON_B2B_BAR4_ADDR64);
}
}
@@ -1698,20 +1692,20 @@ xeon_set_sbar_base_and_limit(struct ntb_softc *ntb, uint64_t bar_addr,
}
if (!bar_is_64bit(ntb, idx)) {
- ntb_reg_write(4, base_reg, bar_addr);
- reg_val = ntb_reg_read(4, base_reg);
+ intel_ntb_reg_write(4, base_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(4, base_reg);
(void)reg_val;
- ntb_reg_write(4, lmt_reg, bar_addr);
- reg_val = ntb_reg_read(4, lmt_reg);
+ intel_ntb_reg_write(4, lmt_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(4, lmt_reg);
(void)reg_val;
} else {
- ntb_reg_write(8, base_reg, bar_addr);
- reg_val = ntb_reg_read(8, base_reg);
+ intel_ntb_reg_write(8, base_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(8, base_reg);
(void)reg_val;
- ntb_reg_write(8, lmt_reg, bar_addr);
- reg_val = ntb_reg_read(8, lmt_reg);
+ intel_ntb_reg_write(8, lmt_reg, bar_addr);
+ reg_val = intel_ntb_reg_read(8, lmt_reg);
(void)reg_val;
}
}
@@ -1723,11 +1717,11 @@ xeon_set_pbar_xlat(struct ntb_softc *ntb, uint64_t base_addr, enum ntb_bar idx)
bar = &ntb->bar_info[idx];
if (HAS_FEATURE(ntb, NTB_SPLIT_BAR) && idx >= NTB_B2B_BAR_2) {
- ntb_reg_write(4, bar->pbarxlat_off, base_addr);
- base_addr = ntb_reg_read(4, bar->pbarxlat_off);
+ intel_ntb_reg_write(4, bar->pbarxlat_off, base_addr);
+ base_addr = intel_ntb_reg_read(4, bar->pbarxlat_off);
} else {
- ntb_reg_write(8, bar->pbarxlat_off, base_addr);
- base_addr = ntb_reg_read(8, bar->pbarxlat_off);
+ intel_ntb_reg_write(8, bar->pbarxlat_off, base_addr);
+ base_addr = intel_ntb_reg_read(8, bar->pbarxlat_off);
}
(void)base_addr;
}
@@ -1746,7 +1740,7 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
b2b_bar_num = NTB_CONFIG_BAR;
ntb->b2b_off = 0;
} else {
- b2b_bar_num = ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
+ b2b_bar_num = intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx);
KASSERT(b2b_bar_num > 0 && b2b_bar_num < NTB_MAX_BARS,
("invalid b2b mw bar"));
@@ -1786,7 +1780,7 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
else
KASSERT(false, ("invalid bar"));
- ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
+ intel_ntb_reg_write(8, XEON_SBAR0BASE_OFFSET, bar_addr);
/*
* Other SBARs are normally hit by the PBAR xlat, except for the b2b
@@ -1807,8 +1801,8 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
NTB_B2B_BAR_2, b2b_bar_num);
/* Zero incoming translation addrs */
- ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
- ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
+ intel_ntb_reg_write(8, XEON_SBAR2XLAT_OFFSET, 0);
+ intel_ntb_reg_write(8, XEON_SBAR4XLAT_OFFSET, 0);
if (HAS_FEATURE(ntb, NTB_SB01BASE_LOCKUP)) {
uint32_t xlat_reg, lmt_reg;
@@ -1818,26 +1812,26 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
* We point the chosen MSIX MW BAR xlat to remote LAPIC for
* workaround
*/
- bar_num = ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
+ bar_num = intel_ntb_mw_to_bar(ntb, ntb->msix_mw_idx);
bar_get_xlat_params(ntb, bar_num, NULL, &xlat_reg, &lmt_reg);
if (bar_is_64bit(ntb, bar_num)) {
- ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE);
- ntb->msix_xlat = ntb_reg_read(8, xlat_reg);
- ntb_reg_write(8, lmt_reg, 0);
+ intel_ntb_reg_write(8, xlat_reg, MSI_INTEL_ADDR_BASE);
+ ntb->msix_xlat = intel_ntb_reg_read(8, xlat_reg);
+ intel_ntb_reg_write(8, lmt_reg, 0);
} else {
- ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE);
- ntb->msix_xlat = ntb_reg_read(4, xlat_reg);
- ntb_reg_write(4, lmt_reg, 0);
+ intel_ntb_reg_write(4, xlat_reg, MSI_INTEL_ADDR_BASE);
+ ntb->msix_xlat = intel_ntb_reg_read(4, xlat_reg);
+ intel_ntb_reg_write(4, lmt_reg, 0);
}
ntb->peer_lapic_bar = &ntb->bar_info[bar_num];
}
- (void)ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
- (void)ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
+ (void)intel_ntb_reg_read(8, XEON_SBAR2XLAT_OFFSET);
+ (void)intel_ntb_reg_read(8, XEON_SBAR4XLAT_OFFSET);
/* Zero outgoing translation limits (whole bar size windows) */
- ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
- ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
+ intel_ntb_reg_write(8, XEON_PBAR2LMT_OFFSET, 0);
+ intel_ntb_reg_write(8, XEON_PBAR4LMT_OFFSET, 0);
/* Set outgoing translation offsets */
xeon_set_pbar_xlat(ntb, peer_addr->bar2_addr64, NTB_B2B_BAR_1);
@@ -1866,8 +1860,8 @@ xeon_setup_b2b_mw(struct ntb_softc *ntb, const struct ntb_b2b_addr *addr,
* B2B_XLAT_OFFSET is a 64-bit register but can only be written 32 bits
* at a time.
*/
- ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
- ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
+ intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETL, bar_addr & 0xffffffff);
+ intel_ntb_reg_write(4, XEON_B2B_XLAT_OFFSETU, bar_addr >> 32);
return (0);
}
@@ -1899,11 +1893,11 @@ atom_link_is_err(struct ntb_softc *ntb)
KASSERT(ntb->type == NTB_ATOM, ("ntb type"));
- status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
if ((status & ATOM_LTSSMSTATEJMP_FORCEDETECT) != 0)
return (true);
- status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
return ((status & ATOM_IBIST_ERR_OFLOW) != 0);
}
@@ -1926,7 +1920,7 @@ atom_link_hb(void *arg)
goto out;
}
- if (ntb_poll_link(ntb))
+ if (intel_ntb_poll_link(ntb))
ntb_link_event(ntb->device);
if (!link_is_up(ntb) && atom_link_is_err(ntb)) {
@@ -1945,137 +1939,47 @@ atom_perform_link_restart(struct ntb_softc *ntb)
uint32_t status;
/* Driver resets the NTB ModPhy lanes - magic! */
- ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
- ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
- ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
- ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0xe0);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x40);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG4, 0x60);
+ intel_ntb_reg_write(1, ATOM_MODPHY_PCSREG6, 0x60);
/* Driver waits 100ms to allow the NTB ModPhy to settle */
pause("ModPhy", hz / 10);
/* Clear AER Errors, write to clear */
- status = ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_ERRCORSTS_OFFSET);
status &= PCIM_AER_COR_REPLAY_ROLLOVER;
- ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_ERRCORSTS_OFFSET, status);
/* Clear unexpected electrical idle event in LTSSM, write to clear */
- status = ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_LTSSMERRSTS0_OFFSET);
status |= ATOM_LTSSMERRSTS0_UNEXPECTEDEI;
- ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_LTSSMERRSTS0_OFFSET, status);
/* Clear DeSkew Buffer error, write to clear */
- status = ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_DESKEWSTS_OFFSET);
status |= ATOM_DESKEWSTS_DBERR;
- ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_DESKEWSTS_OFFSET, status);
- status = ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_IBSTERRRCRVSTS0_OFFSET);
status &= ATOM_IBIST_ERR_OFLOW;
- ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
+ intel_ntb_reg_write(4, ATOM_IBSTERRRCRVSTS0_OFFSET, status);
/* Releases the NTB state machine to allow the link to retrain */
- status = ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
+ status = intel_ntb_reg_read(4, ATOM_LTSSMSTATEJMP_OFFSET);
status &= ~ATOM_LTSSMSTATEJMP_FORCEDETECT;
- ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
-}
-
-static int
-ntb_set_ctx(device_t dev, void *ctx, const struct ntb_ctx_ops *ops)
-{
- struct ntb_softc *ntb = device_get_softc(dev);
-
- if (ctx == NULL || ops == NULL)
- return (EINVAL);
-
- rm_wlock(&ntb->ctx_lock);
- if (ntb->ctx_ops != NULL) {
- rm_wunlock(&ntb->ctx_lock);
- return (EINVAL);
- }
- ntb->ntb_ctx = ctx;
- ntb->ctx_ops = ops;
- rm_wunlock(&ntb->ctx_lock);
-
- return (0);
-}
-
-/*
- * It is expected that this will only be used from contexts where the ctx_lock
- * is not needed to protect ntb_ctx lifetime.
- */
-static void *
-ntb_get_ctx(device_t dev, const struct ntb_ctx_ops **ops)
-{
- struct ntb_softc *ntb = device_get_softc(dev);
-
- KASSERT(ntb->ntb_ctx != NULL && ntb->ctx_ops != NULL, ("bogus"));
- if (ops != NULL)
- *ops = ntb->ctx_ops;
- return (ntb->ntb_ctx);
-}
-
-static void
-ntb_clear_ctx(device_t dev)
-{
- struct ntb_softc *ntb = device_get_softc(dev);
-
- rm_wlock(&ntb->ctx_lock);
- ntb->ntb_ctx = NULL;
- ntb->ctx_ops = NULL;
- rm_wunlock(&ntb->ctx_lock);
-}
-
-/*
- * ntb_link_event() - notify driver context of a change in link status
- * @ntb: NTB device context
- *
- * Notify the driver context that the link status may have changed. The driver
- * should call ntb_link_is_up() to get the current status.
- */
-static void
-ntb_link_event(device_t dev)
-{
- struct ntb_softc *ntb = device_get_softc(dev);
- struct rm_priotracker ctx_tracker;
-
- rm_rlock(&ntb->ctx_lock, &ctx_tracker);
- if (ntb->ctx_ops != NULL && ntb->ctx_ops->link_event != NULL)
- ntb->ctx_ops->link_event(ntb->ntb_ctx);
- rm_runlock(&ntb->ctx_lock, &ctx_tracker);
-}
-
-/*
- * ntb_db_event() - notify driver context of a doorbell event
- * @ntb: NTB device context
- * @vector: Interrupt vector number
- *
- * Notify the driver context of a doorbell event. If hardware supports
- * multiple interrupt vectors for doorbells, the vector number indicates which
- * vector received the interrupt. The vector number is relative to the first
- * vector used for doorbells, starting at zero, and must be less than
- * ntb_db_vector_count(). The driver may call ntb_db_read() to check which
- * doorbell bits need service, and ntb_db_vector_mask() to determine which of
- * those bits are associated with the vector number.
- */
-static void
-ntb_db_event(device_t dev, uint32_t vec)
-{
- struct ntb_softc *ntb = device_get_softc(dev);
- struct rm_priotracker ctx_tracker;
-
- rm_rlock(&ntb->ctx_lock, &ctx_tracker);
- if (ntb->ctx_ops != NULL && ntb->ctx_ops->db_event != NULL)
- ntb->ctx_ops->db_event(ntb->ntb_ctx, vec);
- rm_runlock(&ntb->ctx_lock, &ctx_tracker);
+ intel_ntb_reg_write(4, ATOM_LTSSMSTATEJMP_OFFSET, status);
}
static int
-ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
+intel_ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
enum ntb_width width __unused)
{
struct ntb_softc *ntb = device_get_softc(dev);
uint32_t cntl;
- ntb_printf(2, "%s\n", __func__);
+ intel_ntb_printf(2, "%s\n", __func__);
if (ntb->type == NTB_ATOM) {
pci_write_config(ntb->device, NTB_PPD_OFFSET,
@@ -2088,41 +1992,41 @@ ntb_link_enable(device_t dev, enum ntb_speed speed __unused,
return (0);
}
- cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
cntl &= ~(NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK);
cntl |= NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP;
cntl |= NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP;
if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
cntl |= NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP;
- ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
+ intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
return (0);
}
static int
-ntb_link_disable(device_t dev)
+intel_ntb_link_disable(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
uint32_t cntl;
- ntb_printf(2, "%s\n", __func__);
+ intel_ntb_printf(2, "%s\n", __func__);
if (ntb->conn_type == NTB_CONN_TRANSPARENT) {
ntb_link_event(dev);
return (0);
}
- cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
cntl &= ~(NTB_CNTL_P2S_BAR23_SNOOP | NTB_CNTL_S2P_BAR23_SNOOP);
cntl &= ~(NTB_CNTL_P2S_BAR4_SNOOP | NTB_CNTL_S2P_BAR4_SNOOP);
if (HAS_FEATURE(ntb, NTB_SPLIT_BAR))
cntl &= ~(NTB_CNTL_P2S_BAR5_SNOOP | NTB_CNTL_S2P_BAR5_SNOOP);
cntl |= NTB_CNTL_LINK_DISABLE | NTB_CNTL_CFG_LOCK;
- ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
+ intel_ntb_reg_write(4, ntb->reg->ntb_ctl, cntl);
return (0);
}
static bool
-ntb_link_enabled(device_t dev)
+intel_ntb_link_enabled(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
uint32_t cntl;
@@ -2135,7 +2039,7 @@ ntb_link_enabled(device_t dev)
if (ntb->conn_type == NTB_CONN_TRANSPARENT)
return (true);
- cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
return ((cntl & NTB_CNTL_LINK_DISABLE) == 0);
}
@@ -2160,11 +2064,11 @@ recover_atom_link(void *arg)
if (atom_link_is_err(ntb))
goto retry;
- status32 = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ status32 = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
if ((status32 & ATOM_CNTL_LINK_DOWN) != 0)
goto out;
- status32 = ntb_reg_read(4, ntb->reg->lnk_sta);
+ status32 = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
width = NTB_LNK_STA_WIDTH(status32);
speed = status32 & NTB_LINK_SPEED_MASK;
@@ -2187,18 +2091,18 @@ retry:
* Polls the HW link status register(s); returns true if something has changed.
*/
static bool
-ntb_poll_link(struct ntb_softc *ntb)
+intel_ntb_poll_link(struct ntb_softc *ntb)
{
uint32_t ntb_cntl;
uint16_t reg_val;
if (ntb->type == NTB_ATOM) {
- ntb_cntl = ntb_reg_read(4, ntb->reg->ntb_ctl);
+ ntb_cntl = intel_ntb_reg_read(4, ntb->reg->ntb_ctl);
if (ntb_cntl == ntb->ntb_ctl)
return (false);
ntb->ntb_ctl = ntb_cntl;
- ntb->lnk_sta = ntb_reg_read(4, ntb->reg->lnk_sta);
+ ntb->lnk_sta = intel_ntb_reg_read(4, ntb->reg->lnk_sta);
} else {
db_iowrite_raw(ntb, ntb->self_reg->db_bell, ntb->db_link_mask);
@@ -2212,7 +2116,7 @@ ntb_poll_link(struct ntb_softc *ntb)
if (_xeon_link_is_up(ntb)) {
if (!ntb->peer_msix_good) {
callout_reset(&ntb->peer_msix_work, 0,
- ntb_exchange_msix, ntb);
+ intel_ntb_exchange_msix, ntb);
return (false);
}
} else {
@@ -2225,7 +2129,7 @@ ntb_poll_link(struct ntb_softc *ntb)
}
static inline enum ntb_speed
-ntb_link_sta_speed(struct ntb_softc *ntb)
+intel_ntb_link_sta_speed(struct ntb_softc *ntb)
{
if (!link_is_up(ntb))
@@ -2234,7 +2138,7 @@ ntb_link_sta_speed(struct ntb_softc *ntb)
}
static inline enum ntb_width
-ntb_link_sta_width(struct ntb_softc *ntb)
+intel_ntb_link_sta_width(struct ntb_softc *ntb)
{
if (!link_is_up(ntb))
@@ -2256,7 +2160,7 @@ SYSCTL_NODE(_hw_ntb, OID_AUTO, debug_info, CTLFLAG_RW, 0,
#define NTB_REGFLAGS_MASK (NTB_REGSZ_MASK | NTB_DB_READ | NTB_PCI_REG)
static void
-ntb_sysctl_init(struct ntb_softc *ntb)
+intel_ntb_sysctl_init(struct ntb_softc *ntb)
{
struct sysctl_oid_list *globals, *tree_par, *regpar, *statpar, *errpar;
struct sysctl_ctx_list *ctx;
@@ -2555,7 +2459,7 @@ sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
unsigned old, new;
int error;
- old = ntb_link_enabled(ntb->device);
+ old = intel_ntb_link_enabled(ntb->device);
error = SYSCTL_OUT(req, &old, sizeof(old));
if (error != 0 || req->newptr == NULL)
@@ -2565,13 +2469,13 @@ sysctl_handle_link_admin(SYSCTL_HANDLER_ARGS)
if (error != 0)
return (error);
- ntb_printf(0, "Admin set interface state to '%sabled'\n",
+ intel_ntb_printf(0, "Admin set interface state to '%sabled'\n",
(new != 0)? "en" : "dis");
if (new != 0)
- error = ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ error = intel_ntb_link_enable(ntb->device, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
else
- error = ntb_link_disable(ntb->device);
+ error = intel_ntb_link_disable(ntb->device);
return (error);
}
@@ -2586,7 +2490,7 @@ sysctl_handle_link_status_human(SYSCTL_HANDLER_ARGS)
sbuf_new_for_sysctl(&sb, NULL, 32, req);
- if (ntb_link_is_up(ntb->device, &speed, &width))
+ if (intel_ntb_link_is_up(ntb->device, &speed, &width))
sbuf_printf(&sb, "up / PCIe Gen %u / Width x%u",
(unsigned)speed, (unsigned)width);
else
@@ -2607,7 +2511,7 @@ sysctl_handle_link_status(SYSCTL_HANDLER_ARGS)
unsigned res;
int error;
- res = ntb_link_is_up(ntb->device, NULL, NULL);
+ res = intel_ntb_link_is_up(ntb->device, NULL, NULL);
error = SYSCTL_OUT(req, &res, sizeof(res));
if (error || !req->newptr)
@@ -2646,28 +2550,28 @@ sysctl_handle_register(SYSCTL_HANDLER_ARGS)
if (pci)
umv = pci_read_config(ntb->device, reg, 8);
else
- umv = ntb_reg_read(8, reg);
+ umv = intel_ntb_reg_read(8, reg);
outsz = sizeof(uint64_t);
break;
case NTB_REG_32:
if (pci)
umv = pci_read_config(ntb->device, reg, 4);
else
- umv = ntb_reg_read(4, reg);
+ umv = intel_ntb_reg_read(4, reg);
outsz = sizeof(uint32_t);
break;
case NTB_REG_16:
if (pci)
umv = pci_read_config(ntb->device, reg, 2);
else
- umv = ntb_reg_read(2, reg);
+ umv = intel_ntb_reg_read(2, reg);
outsz = sizeof(uint16_t);
break;
case NTB_REG_8:
if (pci)
umv = pci_read_config(ntb->device, reg, 1);
else
- umv = ntb_reg_read(1, reg);
+ umv = intel_ntb_reg_read(1, reg);
outsz = sizeof(uint8_t);
break;
default:
@@ -2687,7 +2591,7 @@ sysctl_handle_register(SYSCTL_HANDLER_ARGS)
}
static unsigned
-ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
+intel_ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
{
if ((ntb->b2b_mw_idx != B2B_MW_DISABLED && ntb->b2b_off == 0 &&
@@ -2701,8 +2605,21 @@ ntb_user_mw_to_idx(struct ntb_softc *ntb, unsigned uidx)
return (uidx);
}
+#ifndef EARLY_AP_STARTUP
+static int msix_ready;
+
static void
-ntb_exchange_msix(void *ctx)
+intel_ntb_msix_ready(void *arg __unused)
+{
+
+ msix_ready = 1;
+}
+SYSINIT(intel_ntb_msix_ready, SI_SUB_SMP, SI_ORDER_ANY,
+ intel_ntb_msix_ready, NULL);
+#endif
+
+static void
+intel_ntb_exchange_msix(void *ctx)
{
struct ntb_softc *ntb;
uint32_t val;
@@ -2715,41 +2632,49 @@ ntb_exchange_msix(void *ctx)
if (ntb->peer_msix_done)
goto msix_done;
+#ifndef EARLY_AP_STARTUP
+ /* Block MSIX negotiation until SMP started and IRQ reshuffled. */
+ if (!msix_ready)
+ goto reschedule;
+#endif
+
+ intel_ntb_get_msix_info(ntb);
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i,
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DATA0 + i,
ntb->msix_data[i].nmd_data);
- ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i,
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_OFS0 + i,
ntb->msix_data[i].nmd_ofs - ntb->msix_xlat);
}
- ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_GUARD, NTB_MSIX_VER_GUARD);
- ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_GUARD, &val);
if (val != NTB_MSIX_VER_GUARD)
goto reschedule;
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val);
- ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_DATA0 + i, &val);
+ intel_ntb_printf(2, "remote MSIX data(%u): 0x%x\n", i, val);
ntb->peer_msix_data[i].nmd_data = val;
- ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val);
- ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_OFS0 + i, &val);
+ intel_ntb_printf(2, "remote MSIX addr(%u): 0x%x\n", i, val);
ntb->peer_msix_data[i].nmd_ofs = val;
}
ntb->peer_msix_done = true;
msix_done:
- ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
- ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val);
+ intel_ntb_peer_spad_write(ntb->device, NTB_MSIX_DONE, NTB_MSIX_RECEIVED);
+ intel_ntb_spad_read(ntb->device, NTB_MSIX_DONE, &val);
if (val != NTB_MSIX_RECEIVED)
goto reschedule;
+ intel_ntb_spad_clear(ntb->device);
ntb->peer_msix_good = true;
/* Give peer time to see our NTB_MSIX_RECEIVED. */
goto reschedule;
msix_good:
- ntb_poll_link(ntb);
+ intel_ntb_poll_link(ntb);
ntb_link_event(ntb->device);
return;
@@ -2758,9 +2683,9 @@ reschedule:
if (_xeon_link_is_up(ntb)) {
callout_reset(&ntb->peer_msix_work,
hz * (ntb->peer_msix_good ? 2 : 1) / 100,
- ntb_exchange_msix, ntb);
+ intel_ntb_exchange_msix, ntb);
} else
- ntb_spad_clear(ntb->device);
+ intel_ntb_spad_clear(ntb->device);
}
/*
@@ -2768,7 +2693,7 @@ reschedule:
*/
static uint8_t
-ntb_spad_count(device_t dev)
+intel_ntb_spad_count(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -2776,7 +2701,7 @@ ntb_spad_count(device_t dev)
}
static uint8_t
-ntb_mw_count(device_t dev)
+intel_ntb_mw_count(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
uint8_t res;
@@ -2790,14 +2715,14 @@ ntb_mw_count(device_t dev)
}
static int
-ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
+intel_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
{
struct ntb_softc *ntb = device_get_softc(dev);
if (idx >= ntb->spad_count)
return (EINVAL);
- ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
+ intel_ntb_reg_write(4, ntb->self_reg->spad + idx * 4, val);
return (0);
}
@@ -2806,30 +2731,30 @@ ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
* Zeros the local scratchpad.
*/
static void
-ntb_spad_clear(device_t dev)
+intel_ntb_spad_clear(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
unsigned i;
for (i = 0; i < ntb->spad_count; i++)
- ntb_spad_write(dev, i, 0);
+ intel_ntb_spad_write(dev, i, 0);
}
static int
-ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
+intel_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
{
struct ntb_softc *ntb = device_get_softc(dev);
if (idx >= ntb->spad_count)
return (EINVAL);
- *val = ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
+ *val = intel_ntb_reg_read(4, ntb->self_reg->spad + idx * 4);
return (0);
}
static int
-ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
+intel_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -2837,15 +2762,15 @@ ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
return (EINVAL);
if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
- ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
+ intel_ntb_mw_write(4, XEON_SPAD_OFFSET + idx * 4, val);
else
- ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
+ intel_ntb_reg_write(4, ntb->peer_reg->spad + idx * 4, val);
return (0);
}
static int
-ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
+intel_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -2853,15 +2778,15 @@ ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
return (EINVAL);
if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP))
- *val = ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
+ *val = intel_ntb_mw_read(4, XEON_SPAD_OFFSET + idx * 4);
else
- *val = ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
+ *val = intel_ntb_reg_read(4, ntb->peer_reg->spad + idx * 4);
return (0);
}
static int
-ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
+intel_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
bus_addr_t *plimit)
{
@@ -2871,11 +2796,11 @@ ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
size_t bar_b2b_off;
enum ntb_bar bar_num;
- if (mw_idx >= ntb_mw_count(dev))
+ if (mw_idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- mw_idx = ntb_user_mw_to_idx(ntb, mw_idx);
+ mw_idx = intel_ntb_user_mw_to_idx(ntb, mw_idx);
- bar_num = ntb_mw_to_bar(ntb, mw_idx);
+ bar_num = intel_ntb_mw_to_bar(ntb, mw_idx);
bar = &ntb->bar_info[bar_num];
bar_b2b_off = 0;
if (mw_idx == ntb->b2b_mw_idx) {
@@ -2905,7 +2830,7 @@ ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
}
static int
-ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
+intel_ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
{
struct ntb_softc *ntb = device_get_softc(dev);
struct ntb_pci_bar_info *bar;
@@ -2914,11 +2839,11 @@ ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
uint32_t base_reg, xlat_reg, limit_reg;
enum ntb_bar bar_num;
- if (idx >= ntb_mw_count(dev))
+ if (idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- idx = ntb_user_mw_to_idx(ntb, idx);
+ idx = intel_ntb_user_mw_to_idx(ntb, idx);
- bar_num = ntb_mw_to_bar(ntb, idx);
+ bar_num = intel_ntb_mw_to_bar(ntb, idx);
bar = &ntb->bar_info[bar_num];
bar_size = bar->size;
@@ -2938,25 +2863,25 @@ ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
limit = 0;
if (bar_is_64bit(ntb, bar_num)) {
- base = ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
+ base = intel_ntb_reg_read(8, base_reg) & BAR_HIGH_MASK;
if (limit_reg != 0 && size != mw_size)
limit = base + size;
/* Set and verify translation address */
- ntb_reg_write(8, xlat_reg, addr);
- reg_val = ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(8, xlat_reg, addr);
+ reg_val = intel_ntb_reg_read(8, xlat_reg) & BAR_HIGH_MASK;
if (reg_val != addr) {
- ntb_reg_write(8, xlat_reg, 0);
+ intel_ntb_reg_write(8, xlat_reg, 0);
return (EIO);
}
/* Set and verify the limit */
- ntb_reg_write(8, limit_reg, limit);
- reg_val = ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(8, limit_reg, limit);
+ reg_val = intel_ntb_reg_read(8, limit_reg) & BAR_HIGH_MASK;
if (reg_val != limit) {
- ntb_reg_write(8, limit_reg, base);
- ntb_reg_write(8, xlat_reg, 0);
+ intel_ntb_reg_write(8, limit_reg, base);
+ intel_ntb_reg_write(8, xlat_reg, 0);
return (EIO);
}
} else {
@@ -2967,25 +2892,25 @@ ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
if (((addr + size) & UINT32_MAX) != (addr + size))
return (ERANGE);
- base = ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
+ base = intel_ntb_reg_read(4, base_reg) & BAR_HIGH_MASK;
if (limit_reg != 0 && size != mw_size)
limit = base + size;
/* Set and verify translation address */
- ntb_reg_write(4, xlat_reg, addr);
- reg_val = ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(4, xlat_reg, addr);
+ reg_val = intel_ntb_reg_read(4, xlat_reg) & BAR_HIGH_MASK;
if (reg_val != addr) {
- ntb_reg_write(4, xlat_reg, 0);
+ intel_ntb_reg_write(4, xlat_reg, 0);
return (EIO);
}
/* Set and verify the limit */
- ntb_reg_write(4, limit_reg, limit);
- reg_val = ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
+ intel_ntb_reg_write(4, limit_reg, limit);
+ reg_val = intel_ntb_reg_read(4, limit_reg) & BAR_HIGH_MASK;
if (reg_val != limit) {
- ntb_reg_write(4, limit_reg, base);
- ntb_reg_write(4, xlat_reg, 0);
+ intel_ntb_reg_write(4, limit_reg, base);
+ intel_ntb_reg_write(4, xlat_reg, 0);
return (EIO);
}
}
@@ -2993,46 +2918,46 @@ ntb_mw_set_trans(device_t dev, unsigned idx, bus_addr_t addr, size_t size)
}
static int
-ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
+intel_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
{
- return (ntb_mw_set_trans(dev, mw_idx, 0, 0));
+ return (intel_ntb_mw_set_trans(dev, mw_idx, 0, 0));
}
static int
-ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode)
+intel_ntb_mw_get_wc(device_t dev, unsigned idx, vm_memattr_t *mode)
{
struct ntb_softc *ntb = device_get_softc(dev);
struct ntb_pci_bar_info *bar;
- if (idx >= ntb_mw_count(dev))
+ if (idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- idx = ntb_user_mw_to_idx(ntb, idx);
+ idx = intel_ntb_user_mw_to_idx(ntb, idx);
- bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)];
+ bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
*mode = bar->map_mode;
return (0);
}
static int
-ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode)
+intel_ntb_mw_set_wc(device_t dev, unsigned idx, vm_memattr_t mode)
{
struct ntb_softc *ntb = device_get_softc(dev);
- if (idx >= ntb_mw_count(dev))
+ if (idx >= intel_ntb_mw_count(dev))
return (EINVAL);
- idx = ntb_user_mw_to_idx(ntb, idx);
- return (ntb_mw_set_wc_internal(ntb, idx, mode));
+ idx = intel_ntb_user_mw_to_idx(ntb, idx);
+ return (intel_ntb_mw_set_wc_internal(ntb, idx, mode));
}
static int
-ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
+intel_ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
{
struct ntb_pci_bar_info *bar;
int rc;
- bar = &ntb->bar_info[ntb_mw_to_bar(ntb, idx)];
+ bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, idx)];
if (bar->map_mode == mode)
return (0);
@@ -3044,7 +2969,7 @@ ntb_mw_set_wc_internal(struct ntb_softc *ntb, unsigned idx, vm_memattr_t mode)
}
static void
-ntb_peer_db_set(device_t dev, uint64_t bit)
+intel_ntb_peer_db_set(device_t dev, uint64_t bit)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -3055,7 +2980,7 @@ ntb_peer_db_set(device_t dev, uint64_t bit)
lapic = ntb->peer_lapic_bar;
for (i = 0; i < XEON_NONLINK_DB_MSIX_BITS; i++) {
- if ((bit & ntb_db_vector_mask(dev, i)) != 0)
+ if ((bit & intel_ntb_db_vector_mask(dev, i)) != 0)
bus_space_write_4(lapic->pci_bus_tag,
lapic->pci_bus_handle,
ntb->peer_msix_data[i].nmd_ofs,
@@ -3065,7 +2990,7 @@ ntb_peer_db_set(device_t dev, uint64_t bit)
}
if (HAS_FEATURE(ntb, NTB_SDOORBELL_LOCKUP)) {
- ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit);
+ intel_ntb_mw_write(2, XEON_PDOORBELL_OFFSET, bit);
return;
}
@@ -3073,7 +2998,7 @@ ntb_peer_db_set(device_t dev, uint64_t bit)
}
static int
-ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
+intel_ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
{
struct ntb_softc *ntb = device_get_softc(dev);
struct ntb_pci_bar_info *bar;
@@ -3088,7 +3013,7 @@ ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
KASSERT(ntb->b2b_mw_idx != B2B_MW_DISABLED,
("invalid b2b idx"));
- bar = &ntb->bar_info[ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
+ bar = &ntb->bar_info[intel_ntb_mw_to_bar(ntb, ntb->b2b_mw_idx)];
regoff = XEON_PDOORBELL_OFFSET;
}
KASSERT(bar->pci_bus_tag != X86_BUS_SPACE_IO, ("uh oh"));
@@ -3100,7 +3025,7 @@ ntb_peer_db_addr(device_t dev, bus_addr_t *db_addr, vm_size_t *db_size)
}
static uint64_t
-ntb_db_valid_mask(device_t dev)
+intel_ntb_db_valid_mask(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -3108,7 +3033,7 @@ ntb_db_valid_mask(device_t dev)
}
static int
-ntb_db_vector_count(device_t dev)
+intel_ntb_db_vector_count(device_t dev)
{
struct ntb_softc *ntb = device_get_softc(dev);
@@ -3116,24 +3041,24 @@ ntb_db_vector_count(device_t dev)
}
static uint64_t
-ntb_db_vector_mask(device_t dev, uint32_t vector)
+intel_ntb_db_vector_mask(device_t dev, uint32_t vector)
{
struct ntb_softc *ntb = device_get_softc(dev);
if (vector > ntb->db_vec_count)
return (0);
- return (ntb->db_valid_mask & ntb_vec_mask(ntb, vector));
+ return (ntb->db_valid_mask & intel_ntb_vec_mask(ntb, vector));
}
static bool
-ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
+intel_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
{
struct ntb_softc *ntb = device_get_softc(dev);
if (speed != NULL)
- *speed = ntb_link_sta_speed(ntb);
+ *speed = intel_ntb_link_sta_speed(ntb);
if (width != NULL)
- *width = ntb_link_sta_width(ntb);
+ *width = intel_ntb_link_sta_width(ntb);
return (link_is_up(ntb));
}
@@ -3150,38 +3075,35 @@ save_bar_parameters(struct ntb_pci_bar_info *bar)
static device_method_t ntb_intel_methods[] = {
/* Device interface */
- DEVMETHOD(device_probe, ntb_probe),
- DEVMETHOD(device_attach, ntb_attach),
- DEVMETHOD(device_detach, ntb_detach),
+ DEVMETHOD(device_probe, intel_ntb_probe),
+ DEVMETHOD(device_attach, intel_ntb_attach),
+ DEVMETHOD(device_detach, intel_ntb_detach),
/* NTB interface */
- DEVMETHOD(ntb_link_is_up, ntb_link_is_up),
- DEVMETHOD(ntb_link_enable, ntb_link_enable),
- DEVMETHOD(ntb_link_disable, ntb_link_disable),
- DEVMETHOD(ntb_link_enabled, ntb_link_enabled),
- DEVMETHOD(ntb_set_ctx, ntb_set_ctx),
- DEVMETHOD(ntb_get_ctx, ntb_get_ctx),
- DEVMETHOD(ntb_clear_ctx, ntb_clear_ctx),
- DEVMETHOD(ntb_mw_count, ntb_mw_count),
- DEVMETHOD(ntb_mw_get_range, ntb_mw_get_range),
- DEVMETHOD(ntb_mw_set_trans, ntb_mw_set_trans),
- DEVMETHOD(ntb_mw_clear_trans, ntb_mw_clear_trans),
- DEVMETHOD(ntb_mw_get_wc, ntb_mw_get_wc),
- DEVMETHOD(ntb_mw_set_wc, ntb_mw_set_wc),
- DEVMETHOD(ntb_spad_count, ntb_spad_count),
- DEVMETHOD(ntb_spad_clear, ntb_spad_clear),
- DEVMETHOD(ntb_spad_write, ntb_spad_write),
- DEVMETHOD(ntb_spad_read, ntb_spad_read),
- DEVMETHOD(ntb_peer_spad_write, ntb_peer_spad_write),
- DEVMETHOD(ntb_peer_spad_read, ntb_peer_spad_read),
- DEVMETHOD(ntb_db_valid_mask, ntb_db_valid_mask),
- DEVMETHOD(ntb_db_vector_count, ntb_db_vector_count),
- DEVMETHOD(ntb_db_vector_mask, ntb_db_vector_mask),
- DEVMETHOD(ntb_db_clear, ntb_db_clear),
- DEVMETHOD(ntb_db_clear_mask, ntb_db_clear_mask),
- DEVMETHOD(ntb_db_read, ntb_db_read),
- DEVMETHOD(ntb_db_set_mask, ntb_db_set_mask),
- DEVMETHOD(ntb_peer_db_addr, ntb_peer_db_addr),
- DEVMETHOD(ntb_peer_db_set, ntb_peer_db_set),
+ DEVMETHOD(ntb_link_is_up, intel_ntb_link_is_up),
+ DEVMETHOD(ntb_link_enable, intel_ntb_link_enable),
+ DEVMETHOD(ntb_link_disable, intel_ntb_link_disable),
+ DEVMETHOD(ntb_link_enabled, intel_ntb_link_enabled),
+ DEVMETHOD(ntb_mw_count, intel_ntb_mw_count),
+ DEVMETHOD(ntb_mw_get_range, intel_ntb_mw_get_range),
+ DEVMETHOD(ntb_mw_set_trans, intel_ntb_mw_set_trans),
+ DEVMETHOD(ntb_mw_clear_trans, intel_ntb_mw_clear_trans),
+ DEVMETHOD(ntb_mw_get_wc, intel_ntb_mw_get_wc),
+ DEVMETHOD(ntb_mw_set_wc, intel_ntb_mw_set_wc),
+ DEVMETHOD(ntb_spad_count, intel_ntb_spad_count),
+ DEVMETHOD(ntb_spad_clear, intel_ntb_spad_clear),
+ DEVMETHOD(ntb_spad_write, intel_ntb_spad_write),
+ DEVMETHOD(ntb_spad_read, intel_ntb_spad_read),
+ DEVMETHOD(ntb_peer_spad_write, intel_ntb_peer_spad_write),
+ DEVMETHOD(ntb_peer_spad_read, intel_ntb_peer_spad_read),
+ DEVMETHOD(ntb_db_valid_mask, intel_ntb_db_valid_mask),
+ DEVMETHOD(ntb_db_vector_count, intel_ntb_db_vector_count),
+ DEVMETHOD(ntb_db_vector_mask, intel_ntb_db_vector_mask),
+ DEVMETHOD(ntb_db_clear, intel_ntb_db_clear),
+ DEVMETHOD(ntb_db_clear_mask, intel_ntb_db_clear_mask),
+ DEVMETHOD(ntb_db_read, intel_ntb_db_read),
+ DEVMETHOD(ntb_db_set_mask, intel_ntb_db_set_mask),
+ DEVMETHOD(ntb_peer_db_addr, intel_ntb_peer_db_addr),
+ DEVMETHOD(ntb_peer_db_set, intel_ntb_peer_db_set),
DEVMETHOD_END
};
diff --git a/sys/dev/ntb/ntb_if.m b/sys/dev/ntb/ntb_if.m
index bb3b7bd1077b..d8ca22755839 100644
--- a/sys/dev/ntb/ntb_if.m
+++ b/sys/dev/ntb/ntb_if.m
@@ -60,133 +60,45 @@ HEADER {
};
};
-#
-# ntb_link_is_up() - get the current ntb link state
-# @ntb: NTB device context
-# @speed: OUT - The link speed expressed as PCIe generation number
-# @width: OUT - The link width expressed as the number of PCIe lanes
-#
-# RETURNS: true or false based on the hardware link state
-#
METHOD bool link_is_up {
device_t ntb;
enum ntb_speed *speed;
enum ntb_width *width;
};
-#
-# ntb_link_enable() - enable the link on the secondary side of the ntb
-# @ntb: NTB device context
-# @max_speed: The maximum link speed expressed as PCIe generation number[0]
-# @max_width: The maximum link width expressed as the number of PCIe lanes[0]
-#
-# Enable the link on the secondary side of the ntb. This can only be done
-# from the primary side of the ntb in primary or b2b topology. The ntb device
-# should train the link to its maximum speed and width, or the requested speed
-# and width, whichever is smaller, if supported.
-#
-# Return: Zero on success, otherwise an error number.
-#
-# [0]: Only NTB_SPEED_AUTO and NTB_WIDTH_AUTO are valid inputs; other speed
-# and width input will be ignored.
-#/
METHOD int link_enable {
device_t ntb;
enum ntb_speed speed;
enum ntb_width width;
};
-#
-# ntb_link_disable() - disable the link on the secondary side of the ntb
-# @ntb: NTB device context
-#
-# Disable the link on the secondary side of the ntb. This can only be done
-# from the primary side of the ntb in primary or b2b topology. The ntb device
-# should disable the link. Returning from this call must indicate that a
-# barrier has passed, though with no more writes may pass in either direction
-# across the link, except if this call returns an error number.
-#
-# Return: Zero on success, otherwise an error number.
-#
METHOD int link_disable {
device_t ntb;
};
-#
-# get enable status of the link on the secondary side of the ntb
-#
METHOD bool link_enabled {
device_t ntb;
};
-#
-# ntb_set_ctx() - associate a driver context with an ntb device
-# @ntb: NTB device context
-# @ctx: Driver context
-# @ctx_ops: Driver context operations
-#
-# Associate a driver context and operations with a ntb device. The context is
-# provided by the client driver, and the driver may associate a different
-# context with each ntb device.
-#
-# Return: Zero if the context is associated, otherwise an error number.
-#
METHOD int set_ctx {
device_t ntb;
void *ctx;
const struct ntb_ctx_ops *ctx_ops;
};
-#
-# ntb_set_ctx() - get a driver context associated with an ntb device
-# @ntb: NTB device context
-# @ctx_ops: Driver context operations
-#
-# Get a driver context and operations associated with a ntb device.
-#
METHOD void * get_ctx {
device_t ntb;
const struct ntb_ctx_ops **ctx_ops;
};
-#
-# ntb_clear_ctx() - disassociate any driver context from an ntb device
-# @ntb: NTB device context
-#
-# Clear any association that may exist between a driver context and the ntb
-# device.
-#
METHOD void clear_ctx {
device_t ntb;
};
-#
-# ntb_mw_count() - Get the number of memory windows available for KPI
-# consumers.
-#
-# (Excludes any MW wholly reserved for register access.)
-#
METHOD uint8_t mw_count {
device_t ntb;
};
-#
-# ntb_mw_get_range() - get the range of a memory window
-# @ntb: NTB device context
-# @idx: Memory window number
-# @base: OUT - the base address for mapping the memory window
-# @size: OUT - the size for mapping the memory window
-# @align: OUT - the base alignment for translating the memory window
-# @align_size: OUT - the size alignment for translating the memory window
-#
-# Get the range of a memory window. NULL may be given for any output
-# parameter if the value is not needed. The base and size may be used for
-# mapping the memory window, to access the peer memory. The alignment and
-# size may be used for translating the memory window, for the peer to access
-# memory on the local system.
-#
-# Return: Zero on success, otherwise an error number.
-#
METHOD int mw_get_range {
device_t ntb;
unsigned mw_idx;
@@ -198,22 +110,6 @@ METHOD int mw_get_range {
bus_addr_t *plimit;
};
-#
-# ntb_mw_set_trans() - set the translation of a memory window
-# @ntb: NTB device context
-# @idx: Memory window number
-# @addr: The dma address local memory to expose to the peer
-# @size: The size of the local memory to expose to the peer
-#
-# Set the translation of a memory window. The peer may access local memory
-# through the window starting at the address, up to the size. The address
-# must be aligned to the alignment specified by ntb_mw_get_range(). The size
-# must be aligned to the size alignment specified by ntb_mw_get_range(). The
-# address must be below the plimit specified by ntb_mw_get_range() (i.e. for
-# 32-bit BARs).
-#
-# Return: Zero on success, otherwise an error number.
-#
METHOD int mw_set_trans {
device_t ntb;
unsigned mw_idx;
@@ -221,277 +117,94 @@ METHOD int mw_set_trans {
size_t size;
};
-#
-# ntb_mw_clear_trans() - clear the translation of a memory window
-# @ntb: NTB device context
-# @idx: Memory window number
-#
-# Clear the translation of a memory window. The peer may no longer access
-# local memory through the window.
-#
-# Return: Zero on success, otherwise an error number.
-#
METHOD int mw_clear_trans {
device_t ntb;
unsigned mw_idx;
};
-#
-# ntb_mw_get_wc - Get the write-combine status of a memory window
-#
-# Returns: Zero on success, setting *wc; otherwise an error number (e.g. if
-# idx is an invalid memory window).
-#
-# Mode is a VM_MEMATTR_* type.
-#
METHOD int mw_get_wc {
device_t ntb;
unsigned mw_idx;
vm_memattr_t *mode;
};
-#
-# ntb_mw_set_wc - Set the write-combine status of a memory window
-#
-# If 'mode' matches the current status, this does nothing and succeeds. Mode
-# is a VM_MEMATTR_* type.
-#
-# Returns: Zero on success, setting the caching attribute on the virtual
-# mapping of the BAR; otherwise an error number (e.g. if idx is an invalid
-# memory window, or if changing the caching attribute fails).
-#
METHOD int mw_set_wc {
device_t ntb;
unsigned mw_idx;
vm_memattr_t mode;
};
-#
-# ntb_spad_count() - get the total scratch regs usable
-# @ntb: pointer to ntb_softc instance
-#
-# This function returns the max 32bit scratchpad registers usable by the
-# upper layer.
-#
-# RETURNS: total number of scratch pad registers available
-#
METHOD uint8_t spad_count {
device_t ntb;
};
-#
-# ntb_get_max_spads() - zero local scratch registers
-# @ntb: pointer to ntb_softc instance
-#
-# This functions overwrites all local scratchpad registers with zeroes.
-#
METHOD void spad_clear {
device_t ntb;
};
-#
-# ntb_spad_write() - write to the secondary scratchpad register
-# @ntb: pointer to ntb_softc instance
-# @idx: index to the scratchpad register, 0 based
-# @val: the data value to put into the register
-#
-# This function allows writing of a 32bit value to the indexed scratchpad
-# register. The register resides on the secondary (external) side.
-#
-# RETURNS: An appropriate ERRNO error value on error, or zero for success.
-#
METHOD int spad_write {
device_t ntb;
unsigned int idx;
uint32_t val;
};
-#
-# ntb_spad_read() - read from the primary scratchpad register
-# @ntb: pointer to ntb_softc instance
-# @idx: index to scratchpad register, 0 based
-# @val: pointer to 32bit integer for storing the register value
-#
-# This function allows reading of the 32bit scratchpad register on
-# the primary (internal) side.
-#
-# RETURNS: An appropriate ERRNO error value on error, or zero for success.
-#
METHOD int spad_read {
device_t ntb;
unsigned int idx;
uint32_t *val;
};
-#
-# ntb_peer_spad_write() - write to the secondary scratchpad register
-# @ntb: pointer to ntb_softc instance
-# @idx: index to the scratchpad register, 0 based
-# @val: the data value to put into the register
-#
-# This function allows writing of a 32bit value to the indexed scratchpad
-# register. The register resides on the secondary (external) side.
-#
-# RETURNS: An appropriate ERRNO error value on error, or zero for success.
-#
METHOD int peer_spad_write {
device_t ntb;
unsigned int idx;
uint32_t val;
};
-#
-# ntb_peer_spad_read() - read from the primary scratchpad register
-# @ntb: pointer to ntb_softc instance
-# @idx: index to scratchpad register, 0 based
-# @val: pointer to 32bit integer for storing the register value
-#
-# This function allows reading of the 32bit scratchpad register on
-# the primary (internal) side.
-#
-# RETURNS: An appropriate ERRNO error value on error, or zero for success.
-#
METHOD int peer_spad_read {
device_t ntb;
unsigned int idx;
uint32_t *val;
};
-#
-# ntb_db_valid_mask() - get a mask of doorbell bits supported by the ntb
-# @ntb: NTB device context
-#
-# Hardware may support different number or arrangement of doorbell bits.
-#
-# Return: A mask of doorbell bits supported by the ntb.
-#
METHOD uint64_t db_valid_mask {
device_t ntb;
};
-#
-# ntb_db_vector_count() - get the number of doorbell interrupt vectors
-# @ntb: NTB device context.
-#
-# Hardware may support different number of interrupt vectors.
-#
-# Return: The number of doorbell interrupt vectors.
-#
METHOD int db_vector_count {
device_t ntb;
};
-#
-# ntb_db_vector_mask() - get a mask of doorbell bits serviced by a vector
-# @ntb: NTB device context
-# @vector: Doorbell vector number
-#
-# Each interrupt vector may have a different number or arrangement of bits.
-#
-# Return: A mask of doorbell bits serviced by a vector.
-#
METHOD uint64_t db_vector_mask {
device_t ntb;
uint32_t vector;
};
-#
-# ntb_peer_db_addr() - address and size of the peer doorbell register
-# @ntb: NTB device context.
-# @db_addr: OUT - The address of the peer doorbell register.
-# @db_size: OUT - The number of bytes to write the peer doorbell register.
-#
-# Return the address of the peer doorbell register. This may be used, for
-# example, by drivers that offload memory copy operations to a dma engine.
-# The drivers may wish to ring the peer doorbell at the completion of memory
-# copy operations. For efficiency, and to simplify ordering of operations
-# between the dma memory copies and the ringing doorbell, the driver may
-# append one additional dma memory copy with the doorbell register as the
-# destination, after the memory copy operations.
-#
-# Return: Zero on success, otherwise an error number.
-#
-# Note that writing the peer doorbell via a memory window will *not* generate
-# an interrupt on the remote host; that must be done separately.
-#
METHOD int peer_db_addr {
device_t ntb;
bus_addr_t *db_addr;
vm_size_t *db_size;
};
-#
-# ntb_db_clear() - clear bits in the local doorbell register
-# @ntb: NTB device context.
-# @db_bits: Doorbell bits to clear.
-#
-# Clear bits in the local doorbell register, arming the bits for the next
-# doorbell.
-#
-# Return: Zero on success, otherwise an error number.
-#
METHOD void db_clear {
device_t ntb;
uint64_t bits;
};
-#
-# ntb_db_clear_mask() - clear bits in the local doorbell mask
-# @ntb: NTB device context.
-# @db_bits: Doorbell bits to clear.
-#
-# Clear bits in the local doorbell mask register, allowing doorbell interrupts
-# from being generated for those doorbell bits. If a doorbell bit is already
-# set at the time the mask is cleared, and the corresponding mask bit is
-# changed from set to clear, then the ntb driver must ensure that
-# ntb_db_event() is called. If the hardware does not generate the interrupt
-# on clearing the mask bit, then the driver must call ntb_db_event() anyway.
-#
-# Return: Zero on success, otherwise an error number.
-#
METHOD void db_clear_mask {
device_t ntb;
uint64_t bits;
};
-#
-# ntb_db_read() - read the local doorbell register
-# @ntb: NTB device context.
-#
-# Read the local doorbell register, and return the bits that are set.
-#
-# Return: The bits currently set in the local doorbell register.
-#
METHOD uint64_t db_read {
device_t ntb;
};
-#
-# ntb_db_set_mask() - set bits in the local doorbell mask
-# @ntb: NTB device context.
-# @db_bits: Doorbell mask bits to set.
-#
-# Set bits in the local doorbell mask register, preventing doorbell interrupts
-# from being generated for those doorbell bits. Bits that were already set
-# must remain set.
-#
-# Return: Zero on success, otherwise an error number.
-#
METHOD void db_set_mask {
device_t ntb;
uint64_t bits;
};
-#
-# ntb_peer_db_set() - Set the doorbell on the secondary/external side
-# @ntb: pointer to ntb_softc instance
-# @bit: doorbell bits to ring
-#
-# This function allows triggering of a doorbell on the secondary/external
-# side that will initiate an interrupt on the remote host
-#
METHOD void peer_db_set {
device_t ntb;
uint64_t bits;
};
-
diff --git a/sys/dev/ntb/ntb_transport.c b/sys/dev/ntb/ntb_transport.c
index 6c1c5519fc28..c7bc4da2c388 100644
--- a/sys/dev/ntb/ntb_transport.c
+++ b/sys/dev/ntb/ntb_transport.c
@@ -43,7 +43,6 @@ __FBSDID("$FreeBSD$");
#include <sys/param.h>
#include <sys/kernel.h>
#include <sys/systm.h>
-#include <sys/bitset.h>
#include <sys/bus.h>
#include <sys/ktr.h>
#include <sys/limits.h>
@@ -64,13 +63,6 @@ __FBSDID("$FreeBSD$");
#include "ntb.h"
#include "ntb_transport.h"
-#define QP_SETSIZE 64
-BITSET_DEFINE(_qpset, QP_SETSIZE);
-#define test_bit(pos, addr) BIT_ISSET(QP_SETSIZE, (pos), (addr))
-#define set_bit(pos, addr) BIT_SET(QP_SETSIZE, (pos), (addr))
-#define clear_bit(pos, addr) BIT_CLR(QP_SETSIZE, (pos), (addr))
-#define ffs_bit(addr) BIT_FFS(QP_SETSIZE, (addr))
-
#define KTR_NTB KTR_SPARE3
#define NTB_TRANSPORT_VERSION 4
@@ -94,12 +86,6 @@ SYSCTL_UQUAD(_hw_ntb_transport, OID_AUTO, max_mw_size, CTLFLAG_RDTUN, &max_mw_si
"If enabled (non-zero), limit the size of large memory windows. "
"Both sides of the NTB MUST set the same value here.");
-static unsigned max_num_clients;
-SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, max_num_clients, CTLFLAG_RDTUN,
- &max_num_clients, 0, "Maximum number of NTB transport clients. "
- "0 (default) - use all available NTB memory windows; "
- "positive integer N - Limit to N memory windows.");
-
static unsigned enable_xeon_watchdog;
SYSCTL_UINT(_hw_ntb_transport, OID_AUTO, enable_xeon_watchdog, CTLFLAG_RDTUN,
&enable_xeon_watchdog, 0, "If non-zero, write a register every second to "
@@ -130,7 +116,7 @@ struct ntb_rx_info {
struct ntb_transport_qp {
struct ntb_transport_ctx *transport;
- device_t ntb;
+ device_t dev;
void *cb_data;
@@ -200,15 +186,21 @@ struct ntb_transport_mw {
bus_addr_t dma_addr;
};
+struct ntb_transport_child {
+ device_t dev;
+ int qpoff;
+ int qpcnt;
+ struct ntb_transport_child *next;
+};
+
struct ntb_transport_ctx {
device_t dev;
- device_t ntb;
+ struct ntb_transport_child *child;
struct ntb_transport_mw *mw_vec;
struct ntb_transport_qp *qp_vec;
- struct _qpset qp_bitmap;
- struct _qpset qp_bitmap_free;
unsigned mw_count;
unsigned qp_count;
+ uint64_t qp_bitmap;
volatile bool link_is_up;
struct callout link_work;
struct callout link_watchdog;
@@ -243,7 +235,6 @@ enum {
NTBT_MW0_SZ_LOW,
NTBT_MW1_SZ_HIGH,
NTBT_MW1_SZ_LOW,
- NTBT_MAX_SPAD,
/*
* Some NTB-using hardware have a watchdog to work around NTB hangs; if
@@ -317,7 +308,7 @@ xeon_link_watchdog_hb(void *arg)
struct ntb_transport_ctx *nt;
nt = arg;
- NTB_SPAD_WRITE(nt->ntb, NTBT_WATCHDOG_SPAD, 0);
+ ntb_spad_write(nt->dev, NTBT_WATCHDOG_SPAD, 0);
callout_reset(&nt->link_watchdog, 1 * hz, xeon_link_watchdog_hb, nt);
}
@@ -333,21 +324,50 @@ static int
ntb_transport_attach(device_t dev)
{
struct ntb_transport_ctx *nt = device_get_softc(dev);
- device_t ntb = device_get_parent(dev);
+ struct ntb_transport_child **cpp = &nt->child;
+ struct ntb_transport_child *nc;
struct ntb_transport_mw *mw;
- uint64_t qp_bitmap;
- int rc;
- unsigned i;
+ uint64_t db_bitmap;
+ int rc, i, db_count, spad_count, qp, qpu, qpo, qpt;
+ char cfg[128] = "";
+ char buf[32];
+ char *n, *np, *c, *name;
nt->dev = dev;
- nt->ntb = ntb;
- nt->mw_count = NTB_MW_COUNT(ntb);
+ nt->mw_count = ntb_mw_count(dev);
+ spad_count = ntb_spad_count(dev);
+ db_bitmap = ntb_db_valid_mask(dev);
+ db_count = flsll(db_bitmap);
+ KASSERT(db_bitmap == (1 << db_count) - 1,
+ ("Doorbells are not sequential (%jx).\n", db_bitmap));
+
+ device_printf(dev, "%d memory windows, %d scratchpads, "
+ "%d doorbells\n", nt->mw_count, spad_count, db_count);
+
+ if (nt->mw_count == 0) {
+ device_printf(dev, "At least 1 memory window required.\n");
+ return (ENXIO);
+ }
+ if (spad_count < 6) {
+ device_printf(dev, "At least 6 scratchpads required.\n");
+ return (ENXIO);
+ }
+ if (spad_count < 4 + 2 * nt->mw_count) {
+ nt->mw_count = (spad_count - 4) / 2;
+ device_printf(dev, "Scratchpads enough only for %d "
+ "memory windows.\n", nt->mw_count);
+ }
+ if (db_bitmap == 0) {
+ device_printf(dev, "At least one doorbell required.\n");
+ return (ENXIO);
+ }
+
nt->mw_vec = malloc(nt->mw_count * sizeof(*nt->mw_vec), M_NTB_T,
M_WAITOK | M_ZERO);
for (i = 0; i < nt->mw_count; i++) {
mw = &nt->mw_vec[i];
- rc = NTB_MW_GET_RANGE(ntb, i, &mw->phys_addr, &mw->vbase,
+ rc = ntb_mw_get_range(dev, i, &mw->phys_addr, &mw->vbase,
&mw->phys_size, &mw->xlat_align, &mw->xlat_align_size,
&mw->addr_limit);
if (rc != 0)
@@ -358,49 +378,80 @@ ntb_transport_attach(device_t dev)
mw->virt_addr = NULL;
mw->dma_addr = 0;
- rc = NTB_MW_SET_WC(nt->ntb, i, VM_MEMATTR_WRITE_COMBINING);
+ rc = ntb_mw_set_wc(dev, i, VM_MEMATTR_WRITE_COMBINING);
if (rc)
ntb_printf(0, "Unable to set mw%d caching\n", i);
}
- qp_bitmap = NTB_DB_VALID_MASK(ntb);
- nt->qp_count = flsll(qp_bitmap);
- KASSERT(nt->qp_count != 0, ("bogus db bitmap"));
- nt->qp_count -= 1;
+ qpu = 0;
+ qpo = imin(db_count, nt->mw_count);
+ qpt = db_count;
+
+ snprintf(buf, sizeof(buf), "hint.%s.%d.config", device_get_name(dev),
+ device_get_unit(dev));
+ TUNABLE_STR_FETCH(buf, cfg, sizeof(cfg));
+ n = cfg;
+ i = 0;
+ while ((c = strsep(&n, ",")) != NULL) {
+ np = c;
+ name = strsep(&np, ":");
+ if (name != NULL && name[0] == 0)
+ name = NULL;
+ qp = (np && np[0] != 0) ? strtol(np, NULL, 10) : qpo - qpu;
+ if (qp <= 0)
+ qp = 1;
+
+ if (qp > qpt - qpu) {
+ device_printf(dev, "Not enough resources for config\n");
+ break;
+ }
+
+ nc = malloc(sizeof(*nc), M_DEVBUF, M_WAITOK | M_ZERO);
+ nc->qpoff = qpu;
+ nc->qpcnt = qp;
+ nc->dev = device_add_child(dev, name, -1);
+ if (nc->dev == NULL) {
+ device_printf(dev, "Can not add child.\n");
+ break;
+ }
+ device_set_ivars(nc->dev, nc);
+ *cpp = nc;
+ cpp = &nc->next;
+
+ if (bootverbose) {
+ device_printf(dev, "%d \"%s\": queues %d",
+ i, name, qpu);
+ if (qp > 1)
+ printf("-%d", qpu + qp - 1);
+ printf("\n");
+ }
- if (max_num_clients != 0 && max_num_clients < nt->qp_count)
- nt->qp_count = max_num_clients;
- else if (nt->mw_count < nt->qp_count)
- nt->qp_count = nt->mw_count;
- KASSERT(nt->qp_count <= QP_SETSIZE, ("invalid qp_count"));
+ qpu += qp;
+ i++;
+ }
+ nt->qp_count = qpu;
nt->qp_vec = malloc(nt->qp_count * sizeof(*nt->qp_vec), M_NTB_T,
M_WAITOK | M_ZERO);
- for (i = 0; i < nt->qp_count; i++) {
- set_bit(i, &nt->qp_bitmap);
- set_bit(i, &nt->qp_bitmap_free);
+ for (i = 0; i < nt->qp_count; i++)
ntb_transport_init_queue(nt, i);
- }
callout_init(&nt->link_work, 0);
callout_init(&nt->link_watchdog, 0);
TASK_INIT(&nt->link_cleanup, 0, ntb_transport_link_cleanup_work, nt);
- rc = NTB_SET_CTX(ntb, nt, &ntb_transport_ops);
+ rc = ntb_set_ctx(dev, nt, &ntb_transport_ops);
if (rc != 0)
goto err;
nt->link_is_up = false;
- NTB_LINK_ENABLE(ntb, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
+ ntb_link_enable(dev, NTB_SPEED_AUTO, NTB_WIDTH_AUTO);
if (enable_xeon_watchdog != 0)
callout_reset(&nt->link_watchdog, 0, xeon_link_watchdog_hb, nt);
- /* Attach children to this transport */
- device_add_child(dev, NULL, -1);
bus_generic_attach(dev);
-
return (0);
err:
@@ -413,28 +464,27 @@ static int
ntb_transport_detach(device_t dev)
{
struct ntb_transport_ctx *nt = device_get_softc(dev);
- device_t ntb = nt->ntb;
- struct _qpset qp_bitmap_alloc;
- uint8_t i;
-
- /* Detach & delete all children */
- device_delete_children(dev);
+ struct ntb_transport_child **cpp = &nt->child;
+ struct ntb_transport_child *nc;
+ int error = 0, i;
+
+ while ((nc = *cpp) != NULL) {
+ *cpp = (*cpp)->next;
+ error = device_delete_child(dev, nc->dev);
+ if (error)
+ break;
+ free(nc, M_DEVBUF);
+ }
+ KASSERT(nt->qp_bitmap == 0,
+ ("Some queues not freed on detach (%jx)", nt->qp_bitmap));
ntb_transport_link_cleanup(nt);
taskqueue_drain(taskqueue_swi, &nt->link_cleanup);
callout_drain(&nt->link_work);
callout_drain(&nt->link_watchdog);
- BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc);
- BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free);
-
- /* Verify that all the QPs are freed */
- for (i = 0; i < nt->qp_count; i++)
- if (test_bit(i, &qp_bitmap_alloc))
- ntb_transport_free_queue(&nt->qp_vec[i]);
-
- NTB_LINK_DISABLE(ntb);
- NTB_CLEAR_CTX(ntb);
+ ntb_link_disable(dev);
+ ntb_clear_ctx(dev);
for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
@@ -444,6 +494,14 @@ ntb_transport_detach(device_t dev)
return (0);
}
+int
+ntb_transport_queue_count(device_t dev)
+{
+ struct ntb_transport_child *nc = device_get_ivars(dev);
+
+ return (nc->qpcnt);
+}
+
static void
ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
{
@@ -461,7 +519,7 @@ ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
qp = &nt->qp_vec[qp_num];
qp->qp_num = qp_num;
qp->transport = nt;
- qp->ntb = nt->ntb;
+ qp->dev = nt->dev;
qp->client_ready = false;
qp->event_handler = NULL;
ntb_qp_link_down_reset(qp);
@@ -511,14 +569,12 @@ ntb_transport_init_queue(struct ntb_transport_ctx *nt, unsigned int qp_num)
void
ntb_transport_free_queue(struct ntb_transport_qp *qp)
{
+ struct ntb_transport_ctx *nt = qp->transport;
struct ntb_queue_entry *entry;
- if (qp == NULL)
- return;
-
callout_drain(&qp->link_work);
- NTB_DB_SET_MASK(qp->ntb, 1ull << qp->qp_num);
+ ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
taskqueue_drain_all(qp->rxc_tq);
taskqueue_free(qp->rxc_tq);
@@ -536,7 +592,7 @@ ntb_transport_free_queue(struct ntb_transport_qp *qp)
while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
free(entry, M_NTB_T);
- set_bit(qp->qp_num, &qp->transport->qp_bitmap_free);
+ nt->qp_bitmap &= ~(1 << qp->qp_num);
}
/**
@@ -554,25 +610,20 @@ ntb_transport_free_queue(struct ntb_transport_qp *qp)
* RETURNS: pointer to newly created ntb_queue, NULL on error.
*/
struct ntb_transport_qp *
-ntb_transport_create_queue(void *data, device_t dev,
- const struct ntb_queue_handlers *handlers)
+ntb_transport_create_queue(device_t dev, int q,
+ const struct ntb_queue_handlers *handlers, void *data)
{
- struct ntb_transport_ctx *nt = device_get_softc(dev);
- device_t ntb = device_get_parent(dev);
+ struct ntb_transport_child *nc = device_get_ivars(dev);
+ struct ntb_transport_ctx *nt = device_get_softc(device_get_parent(dev));
struct ntb_queue_entry *entry;
struct ntb_transport_qp *qp;
- unsigned int free_queue;
int i;
- free_queue = ffs_bit(&nt->qp_bitmap_free);
- if (free_queue == 0)
+ if (q < 0 || q >= nc->qpcnt)
return (NULL);
- /* decrement free_queue to make it zero based */
- free_queue--;
-
- qp = &nt->qp_vec[free_queue];
- clear_bit(qp->qp_num, &nt->qp_bitmap_free);
+ qp = &nt->qp_vec[nc->qpoff + q];
+ nt->qp_bitmap |= (1 << qp->qp_num);
qp->cb_data = data;
qp->rx_handler = handlers->rx_handler;
qp->tx_handler = handlers->tx_handler;
@@ -593,7 +644,7 @@ ntb_transport_create_queue(void *data, device_t dev,
ntb_list_add(&qp->ntb_tx_free_q_lock, entry, &qp->tx_free_q);
}
- NTB_DB_CLEAR(ntb, 1ull << qp->qp_num);
+ ntb_db_clear(dev, 1ull << qp->qp_num);
return (qp);
}
@@ -640,7 +691,7 @@ ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
struct ntb_queue_entry *entry;
int rc;
- if (qp == NULL || !qp->link_is_up || len == 0) {
+ if (!qp->link_is_up || len == 0) {
CTR0(KTR_NTB, "TX: link not up");
return (EINVAL);
}
@@ -680,7 +731,7 @@ ntb_tx_copy_callback(void *data)
iowrite32(entry->flags | NTBT_DESC_DONE_FLAG, &hdr->flags);
CTR1(KTR_NTB, "TX: hdr %p set DESC_DONE", hdr);
- NTB_PEER_DB_SET(qp->ntb, 1ull << qp->qp_num);
+ ntb_peer_db_set(qp->dev, 1ull << qp->qp_num);
/*
* The entry length can only be zero if the packet is intended to be a
@@ -790,9 +841,9 @@ again:
;
CTR1(KTR_NTB, "RX: process_rxc returned %d", rc);
- if ((NTB_DB_READ(qp->ntb) & (1ull << qp->qp_num)) != 0) {
+ if ((ntb_db_read(qp->dev) & (1ull << qp->qp_num)) != 0) {
/* If db is set, clear it and check queue once more. */
- NTB_DB_CLEAR(qp->ntb, 1ull << qp->qp_num);
+ ntb_db_clear(qp->dev, 1ull << qp->qp_num);
goto again;
}
}
@@ -949,24 +1000,19 @@ ntb_transport_doorbell_callback(void *data, uint32_t vector)
{
struct ntb_transport_ctx *nt = data;
struct ntb_transport_qp *qp;
- struct _qpset db_bits;
uint64_t vec_mask;
unsigned qp_num;
- BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &db_bits);
- BIT_NAND(QP_SETSIZE, &db_bits, &nt->qp_bitmap_free);
-
- vec_mask = NTB_DB_VECTOR_MASK(nt->ntb, vector);
+ vec_mask = ntb_db_vector_mask(nt->dev, vector);
+ vec_mask &= nt->qp_bitmap;
if ((vec_mask & (vec_mask - 1)) != 0)
- vec_mask &= NTB_DB_READ(nt->ntb);
+ vec_mask &= ntb_db_read(nt->dev);
while (vec_mask != 0) {
qp_num = ffsll(vec_mask) - 1;
- if (test_bit(qp_num, &db_bits)) {
- qp = &nt->qp_vec[qp_num];
- if (qp->link_is_up)
- taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work);
- }
+ qp = &nt->qp_vec[qp_num];
+ if (qp->link_is_up)
+ taskqueue_enqueue(qp->rxc_tq, &qp->rxc_db_work);
vec_mask &= ~(1ull << qp_num);
}
@@ -978,7 +1024,7 @@ ntb_transport_event_callback(void *data)
{
struct ntb_transport_ctx *nt = data;
- if (NTB_LINK_IS_UP(nt->ntb, NULL, NULL)) {
+ if (ntb_link_is_up(nt->dev, NULL, NULL)) {
ntb_printf(1, "HW link up\n");
callout_reset(&nt->link_work, 0, ntb_transport_link_work, nt);
} else {
@@ -992,7 +1038,7 @@ static void
ntb_transport_link_work(void *arg)
{
struct ntb_transport_ctx *nt = arg;
- device_t ntb = nt->ntb;
+ device_t dev = nt->dev;
struct ntb_transport_qp *qp;
uint64_t val64, size;
uint32_t val;
@@ -1006,36 +1052,34 @@ ntb_transport_link_work(void *arg)
if (max_mw_size != 0 && size > max_mw_size)
size = max_mw_size;
- NTB_PEER_SPAD_WRITE(ntb, NTBT_MW0_SZ_HIGH + (i * 2),
+ ntb_peer_spad_write(dev, NTBT_MW0_SZ_HIGH + (i * 2),
size >> 32);
- NTB_PEER_SPAD_WRITE(ntb, NTBT_MW0_SZ_LOW + (i * 2), size);
+ ntb_peer_spad_write(dev, NTBT_MW0_SZ_LOW + (i * 2), size);
}
-
- NTB_PEER_SPAD_WRITE(ntb, NTBT_NUM_MWS, nt->mw_count);
-
- NTB_PEER_SPAD_WRITE(ntb, NTBT_NUM_QPS, nt->qp_count);
-
- NTB_PEER_SPAD_WRITE(ntb, NTBT_VERSION, NTB_TRANSPORT_VERSION);
+ ntb_peer_spad_write(dev, NTBT_NUM_MWS, nt->mw_count);
+ ntb_peer_spad_write(dev, NTBT_NUM_QPS, nt->qp_count);
+ ntb_peer_spad_write(dev, NTBT_QP_LINKS, 0);
+ ntb_peer_spad_write(dev, NTBT_VERSION, NTB_TRANSPORT_VERSION);
/* Query the remote side for its info */
val = 0;
- NTB_SPAD_READ(ntb, NTBT_VERSION, &val);
+ ntb_spad_read(dev, NTBT_VERSION, &val);
if (val != NTB_TRANSPORT_VERSION)
goto out;
- NTB_SPAD_READ(ntb, NTBT_NUM_QPS, &val);
+ ntb_spad_read(dev, NTBT_NUM_QPS, &val);
if (val != nt->qp_count)
goto out;
- NTB_SPAD_READ(ntb, NTBT_NUM_MWS, &val);
+ ntb_spad_read(dev, NTBT_NUM_MWS, &val);
if (val != nt->mw_count)
goto out;
for (i = 0; i < nt->mw_count; i++) {
- NTB_SPAD_READ(ntb, NTBT_MW0_SZ_HIGH + (i * 2), &val);
+ ntb_spad_read(dev, NTBT_MW0_SZ_HIGH + (i * 2), &val);
val64 = (uint64_t)val << 32;
- NTB_SPAD_READ(ntb, NTBT_MW0_SZ_LOW + (i * 2), &val);
+ ntb_spad_read(dev, NTBT_MW0_SZ_LOW + (i * 2), &val);
val64 |= val;
rc = ntb_set_mw(nt, i, val64);
@@ -1061,7 +1105,7 @@ free_mws:
for (i = 0; i < nt->mw_count; i++)
ntb_free_mw(nt, i);
out:
- if (NTB_LINK_IS_UP(ntb, NULL, NULL))
+ if (ntb_link_is_up(dev, NULL, NULL))
callout_reset(&nt->link_work,
NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_transport_link_work, nt);
}
@@ -1116,7 +1160,7 @@ ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, size_t size)
}
/* Notify HW the memory location of the receive buffer */
- rc = NTB_MW_SET_TRANS(nt->ntb, num_mw, mw->dma_addr, mw->xlat_size);
+ rc = ntb_mw_set_trans(nt->dev, num_mw, mw->dma_addr, mw->xlat_size);
if (rc) {
ntb_printf(0, "Unable to set mw%d translation\n", num_mw);
ntb_free_mw(nt, num_mw);
@@ -1134,7 +1178,7 @@ ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
if (mw->virt_addr == NULL)
return;
- NTB_MW_CLEAR_TRANS(nt->ntb, num_mw);
+ ntb_mw_clear_trans(nt->dev, num_mw);
contigfree(mw->virt_addr, mw->xlat_size, M_NTB_T);
mw->xlat_size = 0;
mw->buff_size = 0;
@@ -1194,18 +1238,20 @@ static void
ntb_qp_link_work(void *arg)
{
struct ntb_transport_qp *qp = arg;
- device_t ntb = qp->ntb;
+ device_t dev = qp->dev;
struct ntb_transport_ctx *nt = qp->transport;
- uint32_t val, dummy;
-
- NTB_SPAD_READ(ntb, NTBT_QP_LINKS, &val);
-
- NTB_PEER_SPAD_WRITE(ntb, NTBT_QP_LINKS, val | (1ull << qp->qp_num));
+ int i;
+ uint32_t val;
- /* query remote spad for qp ready bits */
- NTB_PEER_SPAD_READ(ntb, NTBT_QP_LINKS, &dummy);
+ /* Report queues that are up on our side */
+ for (i = 0, val = 0; i < nt->qp_count; i++) {
+ if (nt->qp_vec[i].client_ready)
+ val |= (1 << i);
+ }
+ ntb_peer_spad_write(dev, NTBT_QP_LINKS, val);
/* See if the remote side is up */
+ ntb_spad_read(dev, NTBT_QP_LINKS, &val);
if ((val & (1ull << qp->qp_num)) != 0) {
ntb_printf(2, "qp %d link up\n", qp->qp_num);
qp->link_is_up = true;
@@ -1213,7 +1259,7 @@ ntb_qp_link_work(void *arg)
if (qp->event_handler != NULL)
qp->event_handler(qp->cb_data, NTB_LINK_UP);
- NTB_DB_CLEAR_MASK(ntb, 1ull << qp->qp_num);
+ ntb_db_clear_mask(dev, 1ull << qp->qp_num);
} else if (nt->link_is_up)
callout_reset(&qp->link_work,
NTB_LINK_DOWN_TIMEOUT * hz / 1000, ntb_qp_link_work, qp);
@@ -1224,19 +1270,16 @@ static void
ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
{
struct ntb_transport_qp *qp;
- struct _qpset qp_bitmap_alloc;
- unsigned i;
-
- BIT_COPY(QP_SETSIZE, &nt->qp_bitmap, &qp_bitmap_alloc);
- BIT_NAND(QP_SETSIZE, &qp_bitmap_alloc, &nt->qp_bitmap_free);
+ int i;
/* Pass along the info to any clients */
- for (i = 0; i < nt->qp_count; i++)
- if (test_bit(i, &qp_bitmap_alloc)) {
+ for (i = 0; i < nt->qp_count; i++) {
+ if ((nt->qp_bitmap & (1 << i)) != 0) {
qp = &nt->qp_vec[i];
ntb_qp_link_cleanup(qp);
callout_drain(&qp->link_work);
}
+ }
if (!nt->link_is_up)
callout_drain(&nt->link_work);
@@ -1246,8 +1289,7 @@ ntb_transport_link_cleanup(struct ntb_transport_ctx *nt)
* goes down, blast them now to give them a sane value the next
* time they are accessed
*/
- for (i = 0; i < NTBT_MAX_SPAD; i++)
- NTB_SPAD_WRITE(nt->ntb, i, 0);
+ ntb_spad_clear(nt->dev);
}
static void
@@ -1269,7 +1311,7 @@ ntb_qp_link_down_reset(struct ntb_transport_qp *qp)
{
qp->link_is_up = false;
- NTB_DB_SET_MASK(qp->ntb, 1ull << qp->qp_num);
+ ntb_db_set_mask(qp->dev, 1ull << qp->qp_num);
qp->tx_index = qp->rx_index = 0;
qp->tx_bytes = qp->rx_bytes = 0;
@@ -1305,17 +1347,16 @@ ntb_qp_link_cleanup(struct ntb_transport_qp *qp)
void
ntb_transport_link_down(struct ntb_transport_qp *qp)
{
+ struct ntb_transport_ctx *nt = qp->transport;
+ int i;
uint32_t val;
- if (qp == NULL)
- return;
-
qp->client_ready = false;
-
- NTB_SPAD_READ(qp->ntb, NTBT_QP_LINKS, &val);
-
- NTB_PEER_SPAD_WRITE(qp->ntb, NTBT_QP_LINKS,
- val & ~(1 << qp->qp_num));
+ for (i = 0, val = 0; i < nt->qp_count; i++) {
+ if (nt->qp_vec[i].client_ready)
+ val |= (1 << i);
+ }
+ ntb_peer_spad_write(qp->dev, NTBT_QP_LINKS, val);
if (qp->link_is_up)
ntb_send_link_down(qp);
@@ -1334,8 +1375,6 @@ ntb_transport_link_down(struct ntb_transport_qp *qp)
bool
ntb_transport_link_query(struct ntb_transport_qp *qp)
{
- if (qp == NULL)
- return (false);
return (qp->link_is_up);
}
@@ -1434,8 +1473,6 @@ out:
*/
unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp)
{
- if (qp == NULL)
- return 0;
return (qp->qp_num);
}
@@ -1452,9 +1489,6 @@ unsigned int
ntb_transport_max_size(struct ntb_transport_qp *qp)
{
- if (qp == NULL)
- return (0);
-
return (qp->tx_max_frame - sizeof(struct ntb_payload_header));
}
diff --git a/sys/dev/ntb/ntb_transport.h b/sys/dev/ntb/ntb_transport.h
index deb85688fe01..63cdbce96cb2 100644
--- a/sys/dev/ntb/ntb_transport.h
+++ b/sys/dev/ntb/ntb_transport.h
@@ -43,12 +43,13 @@ struct ntb_queue_handlers {
void (*event_handler)(void *data, enum ntb_link_event status);
};
-unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
-unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
+int ntb_transport_queue_count(device_t dev);
struct ntb_transport_qp *
-ntb_transport_create_queue(void *data, device_t dev,
- const struct ntb_queue_handlers *handlers);
+ntb_transport_create_queue(device_t dev, int q,
+ const struct ntb_queue_handlers *handlers, void *data);
void ntb_transport_free_queue(struct ntb_transport_qp *qp);
+unsigned char ntb_transport_qp_num(struct ntb_transport_qp *qp);
+unsigned int ntb_transport_max_size(struct ntb_transport_qp *qp);
int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
unsigned int len);
int ntb_transport_tx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
diff --git a/sys/dev/nvme/nvme_sim.c b/sys/dev/nvme/nvme_sim.c
index 9c46e168ce52..4f545403fb4b 100644
--- a/sys/dev/nvme/nvme_sim.c
+++ b/sys/dev/nvme/nvme_sim.c
@@ -133,8 +133,6 @@ nvme_sim_action(struct cam_sim *sim, union ccb *ccb)
ns = sim2ns(sim);
ctrlr = sim2ctrlr(sim);
- printf("Sim action: ctrlr %p ns %p\n", ctrlr, ns);
-
mtx_assert(&ctrlr->lock, MA_OWNED);
switch (ccb->ccb_h.func_code) {
@@ -310,8 +308,6 @@ nvme_sim_new_ns(struct nvme_namespace *ns, void *sc_arg)
sc->s_ns = ns;
- printf("Our SIM's softc %p ctrlr %p ns %p\n", sc, ctrlr, ns);
-
/*
* XXX this is creating one bus per ns, but it should be one
* XXX target per controller, and one LUN per namespace.
@@ -349,7 +345,6 @@ nvme_sim_new_ns(struct nvme_namespace *ns, void *sc_arg)
sc->s_path->device->nvme_cdata = nvme_ctrlr_get_data(ns->ctrlr);
/* Scan bus */
- printf("Initiate rescan of the bus\n");
nvme_sim_rescan_target(ctrlr, sc->s_path);
mtx_unlock(&ctrlr->lock);
diff --git a/sys/dev/nvram2env/nvram2env.c b/sys/dev/nvram2env/nvram2env.c
index a9423484fae8..d8da4dd1212e 100644
--- a/sys/dev/nvram2env/nvram2env.c
+++ b/sys/dev/nvram2env/nvram2env.c
@@ -46,50 +46,7 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
-#include <dev/siba/siba_ids.h>
-#include <dev/siba/sibareg.h>
-#include <dev/siba/sibavar.h>
-
-#define nvram2env_read_1(sc, reg) \
- bus_space_read_1((sc)->sc_bt, (sc)->sc_bh,(reg))
-
-#define nvram2env_read_2(sc, reg) \
- bus_space_read_2((sc)->sc_bt, (sc)->sc_bh,(reg))
-
-#define nvram2env_read_4(sc, reg) \
- bus_space_read_4((sc)->sc_bt, (sc)->sc_bh,(reg))
-
-#define nvram2env_write_1(sc, reg, val) \
- bus_space_write_1((sc)->sc_bt, (sc)->sc_bh, \
- (reg), (val))
-
-#define nvram2env_write_2(sc, reg, val) \
- bus_space_write_2((sc)->sc_bt, (sc)->sc_bh, \
- (reg), (val))
-
-#define nvram2env_write_4(sc, reg, val) \
- bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, \
- (reg), (val))
-
-struct nvram2env_softc {
- bus_space_tag_t bst;
- bus_space_handle_t bsh;
- bus_addr_t addr;
- int need_swap;
- uint32_t sig;
- uint32_t flags;
-#define NVRAM_FLAGS_NOCHECK 0x0001 /* Do not check(CRC or somthing else)*/
-#define NVRAM_FLAGS_GENERIC 0x0002 /* Format Generic, skip 4b and read */
-#define NVRAM_FLAGS_BROADCOM 0x0004 /* Format Broadcom, use struct nvram */
-#define NVRAM_FLAGS_UBOOT 0x0008 /* Format Generic, skip 4b of CRC and read */
- uint32_t maxsize;
- uint32_t crc;
-};
-
-static int nvram2env_attach(device_t);
-static int nvram2env_probe(device_t);
-
-#define NVRAM_MAX_SIZE 0x10000
+#include "nvram2env.h"
static void
nvram2env_identify(driver_t * drv, device_t parent)
@@ -100,34 +57,55 @@ nvram2env_identify(driver_t * drv, device_t parent)
BUS_ADD_CHILD(parent, 0, "nvram2env", i);
}
-static int
+int
nvram2env_probe(device_t dev)
{
uint32_t i, ivar, sig;
struct nvram2env_softc * sc = device_get_softc(dev);
- sc->bst = mips_bus_space_generic;
- if (resource_int_value("nvram", device_get_unit(dev), "sig",
- &sc->sig) != 0 || sc->sig == 0)
- sc->sig = 0x48534c46;
+ /*
+ * Please ensure that your implementation of NVRAM->ENV specifies
+ * bus tag
+ */
+ if (sc->bst == NULL)
+ return (ENXIO);
+
+ if (sc->sig == 0)
+ if (resource_int_value("nvram", device_get_unit(dev), "sig",
+ &sc->sig) != 0 || sc->sig == 0)
+ sc->sig = CFE_NVRAM_SIGNATURE;
- if (resource_int_value("nvram", device_get_unit(dev), "maxsize",
- &sc->maxsize) != 0 || sc->maxsize == 0)
- sc->maxsize = NVRAM_MAX_SIZE;
+ if (sc->maxsize == 0)
+ if (resource_int_value("nvram", device_get_unit(dev), "maxsize",
+ &sc->maxsize) != 0 || sc->maxsize == 0)
+ sc->maxsize = NVRAM_MAX_SIZE;
- if (resource_int_value("nvram", device_get_unit(dev), "flags",
- &sc->flags) != 0 || sc->flags == 0)
- sc->flags = NVRAM_FLAGS_GENERIC;
+ if (sc->flags == 0)
+ if (resource_int_value("nvram", device_get_unit(dev), "flags",
+ &sc->flags) != 0 || sc->flags == 0)
+ sc->flags = NVRAM_FLAGS_GENERIC;
for (i = 0; i < 2; i ++)
{
- if (resource_int_value("nvram", device_get_unit(dev),
- (!i)?"base":"fallbackbase", &ivar) != 0 ||
- ivar == 0)
- continue;
+ switch (i) {
+ case 0:
+ break;
+ case 1:
+ case 2:
+ if (resource_int_value("nvram", device_get_unit(dev),
+ (i == 1) ? "base" : "fallbackbase", &ivar) != 0 ||
+ ivar == 0)
+ continue;
+
+ sc->addr = ivar;
+ break;
+ default:
+ break;
+ }
- sc->addr = ivar;
+ if (sc->addr == 0)
+ continue;
if (bootverbose)
device_printf(dev, "base=0x%08x sig=0x%08x "
@@ -172,15 +150,6 @@ unmap_done:
}
-struct nvram {
- u_int32_t sig;
- u_int32_t size;
- u_int32_t unknown1;
- u_int32_t unknown2;
- u_int32_t unknown3;
- char data[];
-};
-
static uint32_t read_4(struct nvram2env_softc * sc, int offset)
{
if (sc->need_swap)
@@ -190,7 +159,7 @@ static uint32_t read_4(struct nvram2env_softc * sc, int offset)
}
-static int
+int
nvram2env_attach(device_t dev)
{
struct nvram2env_softc *sc;
@@ -209,10 +178,11 @@ nvram2env_attach(device_t dev)
sig = read_4(sc, 0);
size = read_4(sc, 4);
-#if 1
+
if (bootverbose)
- device_printf(dev, " size=0x%05x maxsize=0x%05x\n", size, sc->maxsize);
-#endif
+ device_printf(dev, " size=0x%05x maxsize=0x%05x\n", size,
+ sc->maxsize);
+
size = (size > sc->maxsize)?sc->maxsize:size;
@@ -265,12 +235,12 @@ nvram2env_attach(device_t dev)
assign = strchr(pair,'=');
assign[0] = '\0';
value = assign+1;
-#if 1
+
if (bootverbose)
- printf("ENV: %s=%s\n", pair, value);
-#else
- printf("ENV: %s\n", pair);
-#endif
+ printf("ENV[%p]: %s=%s\n",
+ (void*)((char*)pair - (char*)nv),
+ pair, value);
+
kern_setenv(pair, value);
if (strcasecmp(pair, "WAN_MAC_ADDR") == 0) {
@@ -313,12 +283,10 @@ static device_method_t nvram2env_methods[] = {
DEVMETHOD_END
};
-static driver_t nvram2env_driver = {
+driver_t nvram2env_driver = {
"nvram2env",
nvram2env_methods,
sizeof(struct nvram2env_softc),
};
-static devclass_t nvram2env_devclass;
-
-DRIVER_MODULE(nvram2env, nexus, nvram2env_driver, nvram2env_devclass, 0, 0);
+devclass_t nvram2env_devclass;
diff --git a/sys/dev/nvram2env/nvram2env.h b/sys/dev/nvram2env/nvram2env.h
new file mode 100644
index 000000000000..8f81d99921ba
--- /dev/null
+++ b/sys/dev/nvram2env/nvram2env.h
@@ -0,0 +1,88 @@
+/*-
+ * Copyright (c) 2010 Aleksandr Rybalko.
+ * Copyright (c) 2016 Michael Zhilin.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * $FreeBSD$
+ */
+
+
+#ifndef NVRAM2ENV_NVRAM2ENV_H_
+#define NVRAM2ENV_NVRAM2ENV_H_
+
+#define nvram2env_read_1(sc, reg) \
+ bus_space_read_1((sc)->sc_bt, (sc)->sc_bh,(reg))
+
+#define nvram2env_read_2(sc, reg) \
+ bus_space_read_2((sc)->sc_bt, (sc)->sc_bh,(reg))
+
+#define nvram2env_read_4(sc, reg) \
+ bus_space_read_4((sc)->sc_bt, (sc)->sc_bh,(reg))
+
+#define nvram2env_write_1(sc, reg, val) \
+ bus_space_write_1((sc)->sc_bt, (sc)->sc_bh, \
+ (reg), (val))
+
+#define nvram2env_write_2(sc, reg, val) \
+ bus_space_write_2((sc)->sc_bt, (sc)->sc_bh, \
+ (reg), (val))
+
+#define nvram2env_write_4(sc, reg, val) \
+ bus_space_write_4((sc)->sc_bt, (sc)->sc_bh, \
+ (reg), (val))
+
+struct nvram2env_softc {
+ bus_space_tag_t bst;
+ bus_space_handle_t bsh;
+ bus_addr_t addr;
+ int need_swap;
+ uint32_t sig;
+ uint32_t flags;
+#define NVRAM_FLAGS_NOCHECK 0x0001 /* Do not check(CRC or somthing else)*/
+#define NVRAM_FLAGS_GENERIC 0x0002 /* Format Generic, skip 4b and read */
+#define NVRAM_FLAGS_BROADCOM 0x0004 /* Format Broadcom, use struct nvram */
+#define NVRAM_FLAGS_UBOOT 0x0008 /* Format Generic, skip 4b of CRC and read */
+ uint32_t maxsize;
+ uint32_t crc;
+};
+
+#define NVRAM_MAX_SIZE 0x10000
+#define CFE_NVRAM_SIGNATURE 0x48534c46
+
+struct nvram {
+ u_int32_t sig;
+ u_int32_t size;
+ u_int32_t unknown1;
+ u_int32_t unknown2;
+ u_int32_t unknown3;
+ char data[];
+};
+
+int nvram2env_attach(device_t);
+int nvram2env_probe(device_t);
+
+extern devclass_t nvram2env_devclass;
+extern driver_t nvram2env_driver;
+
+#endif /* SYS_DEV_NVRAM2ENV_NVRAM2ENV_H_ */
diff --git a/sys/dev/nvram2env/nvram2env_mips.c b/sys/dev/nvram2env/nvram2env_mips.c
new file mode 100644
index 000000000000..2a669a392f58
--- /dev/null
+++ b/sys/dev/nvram2env/nvram2env_mips.c
@@ -0,0 +1,69 @@
+/*-
+ * Copyright (c) 2010 Aleksandr Rybalko.
+ * Copyright (c) 2016 Michael Zhilin.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Implementation of pseudo driver for MIPS to copy the NVRAM settings
+ * from various sources into the kernel environment.
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include <sys/param.h>
+#include <sys/bus.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <machine/bus.h>
+
+#include "nvram2env.h"
+
+static int
+nvram2env_mips_probe(device_t dev)
+{
+ struct nvram2env_softc *sc;
+
+ sc = device_get_softc(dev);
+ sc->bst = mips_bus_space_generic;
+
+ return (nvram2env_probe(dev));
+}
+
+static device_method_t nvram2env_mips_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, nvram2env_mips_probe),
+
+ DEVMETHOD_END
+};
+
+DEFINE_CLASS_1(nvram2env, nvram2env_mips_driver, nvram2env_mips_methods,
+ sizeof(struct nvram2env_softc), nvram2env_driver);
+DRIVER_MODULE(nvram2env_mips, nexus, nvram2env_mips_driver, nvram2env_devclass,
+ NULL, NULL);
+
+MODULE_VERSION(nvram2env_mips, 1);
+MODULE_DEPEND(nvram2env_mips, nvram2env, 1, 1, 1);
diff --git a/sys/dev/ofw/ofw_fdt.c b/sys/dev/ofw/ofw_fdt.c
index fc41d5bc6a3c..0f7cf07919b3 100644
--- a/sys/dev/ofw/ofw_fdt.c
+++ b/sys/dev/ofw/ofw_fdt.c
@@ -96,6 +96,27 @@ OFW_DEF(ofw_fdt);
static void *fdtp = NULL;
static int
+sysctl_handle_dtb(SYSCTL_HANDLER_ARGS)
+{
+
+ return (sysctl_handle_opaque(oidp, fdtp, fdt_totalsize(fdtp), req));
+}
+
+static void
+sysctl_register_fdt_oid(void *arg)
+{
+
+ /* If there is no FDT registered, skip adding the sysctl */
+ if (fdtp == NULL)
+ return;
+
+ SYSCTL_ADD_PROC(NULL, SYSCTL_STATIC_CHILDREN(_hw_fdt), OID_AUTO, "dtb",
+ CTLTYPE_OPAQUE | CTLFLAG_RD, NULL, 0, sysctl_handle_dtb, "",
+ "Device Tree Blob");
+}
+SYSINIT(dtb_oid, SI_SUB_KMEM, SI_ORDER_ANY, sysctl_register_fdt_oid, 0);
+
+static int
ofw_fdt_init(ofw_t ofw, void *data)
{
int err;
diff --git a/sys/dev/ofw/ofwpci.c b/sys/dev/ofw/ofwpci.c
index 3423050e5559..1daafa644cf6 100644
--- a/sys/dev/ofw/ofwpci.c
+++ b/sys/dev/ofw/ofwpci.c
@@ -414,14 +414,14 @@ ofw_pci_alloc_resource(device_t bus, device_t child, int type, int *rid,
#if defined(NEW_PCIB) && defined(PCI_RES_BUS)
if (type == PCI_RES_BUS) {
return (pci_domain_alloc_bus(sc->sc_pci_domain, child, rid,
- start, end, count, flags));
+ start, end, count, flags | needactivate));
}
#endif
rm = ofw_pci_get_rman(sc, type, flags);
if (rm == NULL) {
return (bus_generic_alloc_resource(bus, child, type, rid,
- start, end, count, flags));
+ start, end, count, flags | needactivate));
}
rv = rman_reserve_resource(rm, start, end, count, flags, child);
diff --git a/sys/dev/ofw/openfirmio.c b/sys/dev/ofw/openfirmio.c
index 5803ec1d6c4a..0079310dc83c 100644
--- a/sys/dev/ofw/openfirmio.c
+++ b/sys/dev/ofw/openfirmio.c
@@ -100,8 +100,6 @@ openfirm_getstr(int len, const char *user, char **cpp)
return (ENAMETOOLONG);
*cpp = cp = malloc(len + 1, M_TEMP, M_WAITOK);
- if (cp == NULL)
- return (ENOMEM);
error = copyin(user, cp, len);
cp[len] = '\0';
return (error);
@@ -173,10 +171,6 @@ openfirm_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags,
if (len <= 0)
break;
value = malloc(len, M_TEMP, M_WAITOK);
- if (value == NULL) {
- error = ENOMEM;
- break;
- }
len = OF_getprop(node, name, (void *)value, len);
error = copyout(value, of->of_buf, len);
break;
@@ -199,10 +193,6 @@ openfirm_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags,
if (error)
break;
value = malloc(of->of_buflen, M_TEMP, M_WAITOK);
- if (value == NULL) {
- error = ENOMEM;
- break;
- }
error = copyin(of->of_buf, value, of->of_buflen);
if (error)
break;
diff --git a/sys/dev/ofw/openpromio.c b/sys/dev/ofw/openpromio.c
index 8ba6d3acef3f..e2a345b1e9db 100644
--- a/sys/dev/ofw/openpromio.c
+++ b/sys/dev/ofw/openpromio.c
@@ -151,18 +151,10 @@ openprom_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flags,
break;
}
prop = malloc(len, M_TEMP, M_WAITOK | M_ZERO);
- if (prop == NULL) {
- error = ENOMEM;
- break;
- }
error = copyinstr(&oprom->oprom_array, prop, len, &done);
if (error != 0)
break;
buf = malloc(OPROMMAXPARAM, M_TEMP, M_WAITOK | M_ZERO);
- if (buf == NULL) {
- error = ENOMEM;
- break;
- }
node = openprom_node;
switch (cmd) {
case OPROMGETPROP:
diff --git a/sys/dev/pci/pci_if.m b/sys/dev/pci/pci_if.m
index 5f8415fddda7..11fa5d75d920 100644
--- a/sys/dev/pci/pci_if.m
+++ b/sys/dev/pci/pci_if.m
@@ -235,6 +235,7 @@ METHOD int iov_attach {
device_t child;
struct nvlist *pf_schema;
struct nvlist *vf_schema;
+ const char *name;
};
METHOD int iov_detach {
diff --git a/sys/dev/pci/pci_iov.c b/sys/dev/pci/pci_iov.c
index bba99f620a4e..db8a035f21aa 100644
--- a/sys/dev/pci/pci_iov.c
+++ b/sys/dev/pci/pci_iov.c
@@ -98,8 +98,22 @@ static nvlist_t *pci_iov_get_pf_subsystem_schema(void);
static nvlist_t *pci_iov_get_vf_subsystem_schema(void);
int
+pci_iov_attach_name(device_t dev, struct nvlist *pf_schema,
+ struct nvlist *vf_schema, const char *fmt, ...)
+{
+ char buf[NAME_MAX + 1];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ return (PCI_IOV_ATTACH(device_get_parent(dev), dev, pf_schema,
+ vf_schema, buf));
+}
+
+int
pci_iov_attach_method(device_t bus, device_t dev, nvlist_t *pf_schema,
- nvlist_t *vf_schema)
+ nvlist_t *vf_schema, const char *name)
{
device_t pcib;
struct pci_devinfo *dinfo;
@@ -149,7 +163,7 @@ pci_iov_attach_method(device_t bus, device_t dev, nvlist_t *pf_schema,
iov->iov_schema = schema;
iov->iov_cdev = make_dev(&iov_cdevsw, device_get_unit(dev),
- UID_ROOT, GID_WHEEL, 0600, "iov/%s", device_get_nameunit(dev));
+ UID_ROOT, GID_WHEEL, 0600, "iov/%s", name);
if (iov->iov_cdev == NULL) {
error = ENOMEM;
diff --git a/sys/dev/pci/pci_iov.h b/sys/dev/pci/pci_iov.h
index fd2f8fba8876..297ae07f4c57 100644
--- a/sys/dev/pci/pci_iov.h
+++ b/sys/dev/pci/pci_iov.h
@@ -33,11 +33,14 @@
struct nvlist;
+int pci_iov_attach_name(device_t dev, struct nvlist *pf_schema,
+ struct nvlist *vf_schema, const char *fmt, ...) __printflike(4, 5);
+
static __inline int
pci_iov_attach(device_t dev, struct nvlist *pf_schema, struct nvlist *vf_schema)
{
return (PCI_IOV_ATTACH(device_get_parent(dev), dev, pf_schema,
- vf_schema));
+ vf_schema, device_get_nameunit(dev)));
}
static __inline int
diff --git a/sys/dev/pci/pci_pci.c b/sys/dev/pci/pci_pci.c
index 06e9e1c919ab..6e8d076d2cad 100644
--- a/sys/dev/pci/pci_pci.c
+++ b/sys/dev/pci/pci_pci.c
@@ -128,9 +128,11 @@ static devclass_t pcib_devclass;
DEFINE_CLASS_0(pcib, pcib_driver, pcib_methods, sizeof(struct pcib_softc));
DRIVER_MODULE(pcib, pci, pcib_driver, pcib_devclass, NULL, NULL);
-#ifdef NEW_PCIB
+#if defined(NEW_PCIB) || defined(PCI_HP)
SYSCTL_DECL(_hw_pci);
+#endif
+#ifdef NEW_PCIB
static int pci_clear_pcib;
SYSCTL_INT(_hw_pci, OID_AUTO, clear_pcib, CTLFLAG_RDTUN, &pci_clear_pcib, 0,
"Clear firmware-assigned resources for PCI-PCI bridge I/O windows.");
@@ -907,11 +909,19 @@ pcib_set_mem_decode(struct pcib_softc *sc)
/*
* PCI-express HotPlug support.
*/
+static int pci_enable_pcie_hp = 1;
+SYSCTL_INT(_hw_pci, OID_AUTO, enable_pcie_hp, CTLFLAG_RDTUN,
+ &pci_enable_pcie_hp, 0,
+ "Enable support for native PCI-express HotPlug.");
+
static void
pcib_probe_hotplug(struct pcib_softc *sc)
{
device_t dev;
+ if (!pci_enable_pcie_hp)
+ return;
+
dev = sc->dev;
if (pci_find_cap(dev, PCIY_EXPRESS, NULL) != 0)
return;
@@ -922,6 +932,13 @@ pcib_probe_hotplug(struct pcib_softc *sc)
sc->pcie_link_cap = pcie_read_config(dev, PCIER_LINK_CAP, 4);
sc->pcie_slot_cap = pcie_read_config(dev, PCIER_SLOT_CAP, 4);
+ /*
+ * XXX: Handling of slots with a power controller needs to be
+ * reexamined. Ignore hotplug on such slots for now.
+ */
+ if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_PCP)
+ return;
+
if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_HPC)
sc->flags |= PCIB_HOTPLUG;
}
@@ -1040,7 +1057,7 @@ static void
pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
bool schedule_task)
{
- bool card_inserted;
+ bool card_inserted, ei_engaged;
/* Clear DETACHING if Present Detect has cleared. */
if ((sc->pcie_slot_sta & (PCIEM_SLOT_STA_PDC | PCIEM_SLOT_STA_PDS)) ==
@@ -1077,8 +1094,8 @@ pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
*/
if (sc->pcie_slot_cap & PCIEM_SLOT_CAP_EIP) {
mask |= PCIEM_SLOT_CTL_EIC;
- if (card_inserted !=
- !(sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS))
+ ei_engaged = (sc->pcie_slot_sta & PCIEM_SLOT_STA_EIS) != 0;
+ if (card_inserted != ei_engaged)
val |= PCIEM_SLOT_CTL_EIC;
}
@@ -1105,7 +1122,7 @@ pcib_pcie_hotplug_update(struct pcib_softc *sc, uint16_t val, uint16_t mask,
pcib_pcie_hotplug_command(sc, val, mask);
/*
- * During attach the child "pci" device is added sychronously;
+ * During attach the child "pci" device is added synchronously;
* otherwise, the task is scheduled to manage the child
* device.
*/
diff --git a/sys/dev/pci/pci_private.h b/sys/dev/pci/pci_private.h
index a9958ca627db..b0f1481810c7 100644
--- a/sys/dev/pci/pci_private.h
+++ b/sys/dev/pci/pci_private.h
@@ -158,7 +158,8 @@ struct resource *pci_alloc_multi_resource(device_t dev, device_t child,
rman_res_t count, u_long num, u_int flags);
int pci_iov_attach_method(device_t bus, device_t dev,
- struct nvlist *pf_schema, struct nvlist *vf_schema);
+ struct nvlist *pf_schema, struct nvlist *vf_schema,
+ const char *name);
int pci_iov_detach_method(device_t bus, device_t dev);
device_t pci_create_iov_child_method(device_t bus, device_t pf,
diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c
index f5a921a75ff1..dba76d130aa5 100644
--- a/sys/dev/pci/pci_user.c
+++ b/sys/dev/pci/pci_user.c
@@ -708,10 +708,9 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
* Go through the list of devices and copy out the devices
* that match the user's criteria.
*/
- for (cio->num_matches = 0, error = 0, i = 0,
+ for (cio->num_matches = 0, i = 0,
dinfo = STAILQ_FIRST(devlist_head);
- (dinfo != NULL) && (cio->num_matches < ionum) &&
- (error == 0) && (i < pci_numdevs);
+ dinfo != NULL;
dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
if (i < cio->offset)
@@ -833,11 +832,12 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t
} else
#endif /* PRE7_COMPAT */
confdata = &dinfo->conf;
- /* Only if we can copy it out do we count it. */
- if (!(error = copyout(confdata,
+ error = copyout(confdata,
(caddr_t)cio->matches +
- confsz * cio->num_matches, confsz)))
- cio->num_matches++;
+ confsz * cio->num_matches, confsz);
+ if (error)
+ break;
+ cio->num_matches++;
}
}
diff --git a/sys/dev/pci/pcivar.h b/sys/dev/pci/pcivar.h
index 3180b4ef5de7..2e50af66c1c2 100644
--- a/sys/dev/pci/pcivar.h
+++ b/sys/dev/pci/pcivar.h
@@ -174,7 +174,7 @@ struct pcicfg_ea {
/* config header information common to all header types */
typedef struct pcicfg {
- struct device *dev; /* device which owns this */
+ device_t dev; /* device which owns this */
STAILQ_HEAD(, pci_map) maps; /* BARs */
diff --git a/sys/dev/sound/sbus/cs4231.c b/sys/dev/sound/sbus/cs4231.c
index 83f70f3a4813..7b80afcf0f9b 100644
--- a/sys/dev/sound/sbus/cs4231.c
+++ b/sys/dev/sound/sbus/cs4231.c
@@ -113,7 +113,7 @@ struct cs4231_channel {
#define CS4231_RES_MEM_MAX 4
#define CS4231_RES_IRQ_MAX 2
struct cs4231_softc {
- struct device *sc_dev;
+ device_t sc_dev;
int sc_rid[CS4231_RES_MEM_MAX];
struct resource *sc_res[CS4231_RES_MEM_MAX];
bus_space_handle_t sc_regh[CS4231_RES_MEM_MAX];
diff --git a/sys/dev/syscons/syscons.c b/sys/dev/syscons/syscons.c
index 063083e0b41d..58d6a0c43efc 100644
--- a/sys/dev/syscons/syscons.c
+++ b/sys/dev/syscons/syscons.c
@@ -172,8 +172,6 @@ SYSCTL_INT(_machdep, OID_AUTO, enable_panic_key, CTLFLAG_RW, &enable_panic_key,
#define VTY_WCHAN(sc, vty) (&SC_DEV(sc, vty))
-static int debugger;
-
/* prototypes */
static int sc_allocate_keyboard(sc_softc_t *sc, int unit);
static int scvidprobe(int unit, int flags, int cons);
@@ -1647,56 +1645,67 @@ sc_cnterm(struct consdev *cp)
sc_console = NULL;
}
+struct sc_cnstate; /* not used yet */
+static void sccnclose(sc_softc_t *sc, struct sc_cnstate *sp);
+static void sccnopen(sc_softc_t *sc, struct sc_cnstate *sp, int flags);
+
static void
-sc_cngrab(struct consdev *cp)
+sccnopen(sc_softc_t *sc, struct sc_cnstate *sp, int flags)
{
- scr_stat *scp;
+ int kbd_mode;
if (!cold &&
- sc_console->sc->cur_scp->index != sc_console->index &&
- sc_console->sc->cur_scp->smode.mode == VT_AUTO &&
+ sc->cur_scp->index != sc_console->index &&
+ sc->cur_scp->smode.mode == VT_AUTO &&
sc_console->smode.mode == VT_AUTO)
- sc_switch_scr(sc_console->sc, sc_console->index);
+ sc_switch_scr(sc, sc_console->index);
- scp = sc_console->sc->cur_scp;
-
- if (scp->sc->kbd == NULL)
- return;
-
- if (scp->grabbed++ > 0)
+ if (sc->kbd == NULL)
return;
/*
* Make sure the keyboard is accessible even when the kbd device
* driver is disabled.
*/
- kbdd_enable(scp->sc->kbd);
+ kbdd_enable(sc->kbd);
- /* we shall always use the keyboard in the XLATE mode here */
- scp->kbd_prev_mode = scp->kbd_mode;
- scp->kbd_mode = K_XLATE;
- (void)kbdd_ioctl(scp->sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
-
- kbdd_poll(scp->sc->kbd, TRUE);
+ /* Switch the keyboard to console mode (K_XLATE, polled) on all scp's. */
+ kbd_mode = K_XLATE;
+ (void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&kbd_mode);
+ kbdd_poll(sc->kbd, TRUE);
}
static void
-sc_cnungrab(struct consdev *cp)
+sccnclose(sc_softc_t *sc, struct sc_cnstate *sp)
{
- scr_stat *scp;
-
- scp = sc_console->sc->cur_scp; /* XXX */
- if (scp->sc->kbd == NULL)
+ if (sc->kbd == NULL)
return;
- if (--scp->grabbed > 0)
- return;
+ /* Restore keyboard mode (for the current, possibly-changed scp). */
+ kbdd_poll(sc->kbd, FALSE);
+ (void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&sc->cur_scp->kbd_mode);
- kbdd_poll(scp->sc->kbd, FALSE);
+ kbdd_disable(sc->kbd);
+}
+
+static void
+sc_cngrab(struct consdev *cp)
+{
+ sc_softc_t *sc;
- scp->kbd_mode = scp->kbd_prev_mode;
- (void)kbdd_ioctl(scp->sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
- kbdd_disable(scp->sc->kbd);
+ sc = sc_console->sc;
+ if (sc->grab_level++ == 0)
+ sccnopen(sc, NULL, 0);
+}
+
+static void
+sc_cnungrab(struct consdev *cp)
+{
+ sc_softc_t *sc;
+
+ sc = sc_console->sc;
+ if (--sc->grab_level == 0)
+ sccnclose(sc, NULL);
}
static void
@@ -1815,7 +1824,7 @@ sccnupdate(scr_stat *scp)
if (suspend_in_progress || scp->sc->font_loading_in_progress)
return;
- if (debugger > 0 || panicstr || shutdown_in_progress) {
+ if (kdb_active || panicstr || shutdown_in_progress) {
sc_touch_scrn_saver();
} else if (scp != scp->sc->cur_scp) {
return;
@@ -1884,7 +1893,7 @@ scrn_timer(void *arg)
#endif /* PC98 */
/* should we stop the screen saver? */
- if (debugger > 0 || panicstr || shutdown_in_progress)
+ if (kdb_active || panicstr || shutdown_in_progress)
sc_touch_scrn_saver();
if (run_scrn_saver) {
if (time_uptime > sc->scrn_time_stamp + scrn_blank_time)
@@ -2279,7 +2288,7 @@ stop_scrn_saver(sc_softc_t *sc, void (*saver)(sc_softc_t *, int))
mark_all(sc->cur_scp);
if (sc->delayed_next_scr)
sc_switch_scr(sc, sc->delayed_next_scr - 1);
- if (debugger == 0)
+ if (!kdb_active)
wakeup(&scrn_blanked);
}
@@ -2474,7 +2483,7 @@ sc_switch_scr(sc_softc_t *sc, u_int next_scr)
DPRINTF(5, ("error 2, requested vty isn't open!\n"));
return EINVAL;
}
- if ((debugger > 0) && (SC_STAT(tp)->smode.mode == VT_PROCESS)) {
+ if (kdb_active && SC_STAT(tp)->smode.mode == VT_PROCESS) {
splx(s);
DPRINTF(5, ("error 3, requested vty is in the VT_PROCESS mode\n"));
return EINVAL;
@@ -2495,7 +2504,7 @@ sc_switch_scr(sc_softc_t *sc, u_int next_scr)
* is supposed to be locked by splhigh(), but the debugger may
* be invoked at splhigh().
*/
- if (debugger == 0)
+ if (!kdb_active)
wakeup(VTY_WCHAN(sc,next_scr));
splx(s);
DPRINTF(5, ("switch done (new == old)\n"));
@@ -2518,7 +2527,7 @@ sc_switch_scr(sc_softc_t *sc, u_int next_scr)
s = spltty();
/* wake up processes waiting for this vty */
- if (debugger == 0)
+ if (!kdb_active)
wakeup(VTY_WCHAN(sc,next_scr));
/* wait for the controlling process to acknowledge, if necessary */
@@ -2669,7 +2678,7 @@ exchange_scr(sc_softc_t *sc)
sc_set_border(scp, scp->border);
/* set up the keyboard for the new screen */
- if (sc->old_scp->kbd_mode != scp->kbd_mode)
+ if (sc->grab_level == 0 && sc->old_scp->kbd_mode != scp->kbd_mode)
(void)kbdd_ioctl(sc->kbd, KDSKBMODE, (caddr_t)&scp->kbd_mode);
update_kbd_state(scp, scp->status, LOCK_MASK);
@@ -2688,13 +2697,13 @@ sc_puts(scr_stat *scp, u_char *buf, int len, int kernel)
#endif
if (scp->tsw) {
- if (!kdb_active && !mtx_owned(&scp->scr_lock)) {
+ if (!kdb_active && !mtx_owned(&scp->sc->scr_lock)) {
need_unlock = 1;
- mtx_lock_spin(&scp->scr_lock);
+ mtx_lock_spin(&scp->sc->scr_lock);
}
(*scp->tsw->te_puts)(scp, buf, len, kernel);
if (need_unlock)
- mtx_unlock_spin(&scp->scr_lock);
+ mtx_unlock_spin(&scp->sc->scr_lock);
}
if (scp->sc->delayed_next_scr)
@@ -2859,8 +2868,10 @@ scinit(int unit, int flags)
* disappeared...
*/
sc = sc_get_softc(unit, flags & SC_KERNEL_CONSOLE);
- if ((sc->flags & SC_INIT_DONE) == 0)
+ if ((sc->flags & SC_INIT_DONE) == 0) {
+ mtx_init(&sc->scr_lock, "scrlock", NULL, MTX_SPIN);
SC_VIDEO_LOCKINIT(sc);
+ }
adp = NULL;
if (sc->adapter >= 0) {
@@ -3077,7 +3088,8 @@ scterm(int unit, int flags)
(*scp->tsw->te_term)(scp, &scp->ts);
if (scp->ts != NULL)
free(scp->ts, M_DEVBUF);
- mtx_destroy(&scp->scr_lock);
+ mtx_destroy(&sc->scr_lock);
+ mtx_destroy(&sc->video_mtx);
/* clear the structure */
if (!(flags & SC_KERNEL_CONSOLE)) {
@@ -3302,8 +3314,6 @@ init_scp(sc_softc_t *sc, int vty, scr_stat *scp)
scp->history = NULL;
scp->history_pos = 0;
scp->history_size = 0;
-
- mtx_init(&scp->scr_lock, "scrlock", NULL, MTX_SPIN);
}
int
@@ -3413,7 +3423,7 @@ next_code:
if (!(flags & SCGETC_CN))
random_harvest_queue(&c, sizeof(c), 1, RANDOM_KEYBOARD);
- if (scp->kbd_mode != K_XLATE)
+ if (sc->grab_level == 0 && scp->kbd_mode != K_XLATE)
return KEYCHAR(c);
/* if scroll-lock pressed allow history browsing */
@@ -3506,8 +3516,9 @@ next_code:
scp->status |= CURSOR_ENABLED;
sc_draw_cursor_image(scp);
}
+ /* Only safe in Giant-locked context. */
tp = SC_DEV(sc, scp->index);
- if (!kdb_active && tty_opened_ns(tp))
+ if (!(flags & SCGETC_CN) && tty_opened_ns(tp))
sctty_outwakeup(tp);
#endif
}
@@ -3558,21 +3569,21 @@ next_code:
case RBT:
#ifndef SC_DISABLE_REBOOT
- if (enable_reboot)
+ if (enable_reboot && !(flags & SCGETC_CN))
shutdown_nice(0);
#endif
break;
case HALT:
#ifndef SC_DISABLE_REBOOT
- if (enable_reboot)
+ if (enable_reboot && !(flags & SCGETC_CN))
shutdown_nice(RB_HALT);
#endif
break;
case PDWN:
#ifndef SC_DISABLE_REBOOT
- if (enable_reboot)
+ if (enable_reboot && !(flags & SCGETC_CN))
shutdown_nice(RB_HALT|RB_POWEROFF);
#endif
break;
@@ -3843,7 +3854,7 @@ sc_respond(scr_stat *scp, const u_char *p, int count, int wakeup)
void
sc_bell(scr_stat *scp, int pitch, int duration)
{
- if (cold || shutdown_in_progress || !enable_bell)
+ if (cold || kdb_active || shutdown_in_progress || !enable_bell)
return;
if (scp != scp->sc->cur_scp && (scp->sc->flags & SC_QUIET_BELL))
diff --git a/sys/dev/syscons/syscons.h b/sys/dev/syscons/syscons.h
index 06e104595c2e..e0aa0beb4328 100644
--- a/sys/dev/syscons/syscons.h
+++ b/sys/dev/syscons/syscons.h
@@ -230,6 +230,8 @@ typedef struct sc_softc {
char switch_in_progress;
char write_in_progress;
char blink_in_progress;
+ int grab_level;
+ struct mtx scr_lock; /* mutex for sc_puts() */
struct mtx video_mtx;
long scrn_time_stamp;
@@ -303,9 +305,7 @@ typedef struct scr_stat {
void *ts;
int status; /* status (bitfield) */
- int grabbed;
int kbd_mode; /* keyboard I/O mode */
- int kbd_prev_mode; /* keyboard I/O mode */
int cursor_pos; /* cursor buffer position */
int cursor_oldpos; /* cursor old buffer position */
@@ -344,7 +344,6 @@ typedef struct scr_stat {
int splash_save_mode; /* saved mode for splash screen */
int splash_save_status; /* saved status for splash screen */
- struct mtx scr_lock; /* mutex for sc_puts() */
#ifdef _SCR_MD_STAT_DECLARED_
scr_md_stat_t md; /* machine dependent vars */
#endif
diff --git a/sys/dev/tpm/tpm.c b/sys/dev/tpm/tpm.c
index 3ac52d6e7691..3779c09f8193 100644
--- a/sys/dev/tpm/tpm.c
+++ b/sys/dev/tpm/tpm.c
@@ -175,8 +175,8 @@ struct cfdriver tpm_cd = {
NULL, "tpm", DV_DULL
};
-int tpm_match(struct device *, void *, void *);
-void tpm_attach(struct device *, struct device *, void *);
+int tpm_match(device_t , void *, void *);
+void tpm_attach(device_t , device_t , void *);
struct cfattach tpm_ca = {
sizeof(struct tpm_softc), tpm_match, tpm_attach
@@ -337,7 +337,7 @@ tpm_detach(device_t dev)
* OpenBSD specific code for probing and attaching TPM to device tree.
*/
int
-tpm_match(struct device *parent, void *match, void *aux)
+tpm_match(device_t parent, void *match, void *aux)
{
struct isa_attach_args *ia = aux;
struct cfdata *cf = match;
@@ -370,7 +370,7 @@ tpm_match(struct device *parent, void *match, void *aux)
}
void
-tpm_attach(struct device *parent, struct device *self, void *aux)
+tpm_attach(device_t parent, device_t self, void *aux)
{
struct tpm_softc *sc = (struct tpm_softc *)self;
struct isa_attach_args *ia = aux;
diff --git a/sys/dev/tws/tws.c b/sys/dev/tws/tws.c
index bf9e143c72f5..480f6f954893 100644
--- a/sys/dev/tws/tws.c
+++ b/sys/dev/tws/tws.c
@@ -606,21 +606,9 @@ tws_init(struct tws_softc *sc)
sc->reqs = malloc(sizeof(struct tws_request) * tws_queue_depth, M_TWS,
M_WAITOK | M_ZERO);
- if ( sc->reqs == NULL ) {
- TWS_TRACE_DEBUG(sc, "malloc failed", 0, sc->is64bit);
- return(ENOMEM);
- }
sc->sense_bufs = malloc(sizeof(struct tws_sense) * tws_queue_depth, M_TWS,
M_WAITOK | M_ZERO);
- if ( sc->sense_bufs == NULL ) {
- TWS_TRACE_DEBUG(sc, "sense malloc failed", 0, sc->is64bit);
- return(ENOMEM);
- }
sc->scan_ccb = malloc(sizeof(union ccb), M_TWS, M_WAITOK | M_ZERO);
- if ( sc->scan_ccb == NULL ) {
- TWS_TRACE_DEBUG(sc, "ccb malloc failed", 0, sc->is64bit);
- return(ENOMEM);
- }
if (bus_dmamem_alloc(sc->data_tag, (void **)&sc->ioctl_data_mem,
(BUS_DMA_NOWAIT | BUS_DMA_ZERO), &sc->ioctl_data_map)) {
device_printf(sc->tws_dev, "Cannot allocate ioctl data mem\n");
@@ -668,8 +656,6 @@ tws_init_aen_q(struct tws_softc *sc)
sc->aen_q.overflow=0;
sc->aen_q.q = malloc(sizeof(struct tws_event_packet)*sc->aen_q.depth,
M_TWS, M_WAITOK | M_ZERO);
- if ( ! sc->aen_q.q )
- return(FAILURE);
return(SUCCESS);
}
@@ -682,8 +668,6 @@ tws_init_trace_q(struct tws_softc *sc)
sc->trace_q.overflow=0;
sc->trace_q.q = malloc(sizeof(struct tws_trace_rec)*sc->trace_q.depth,
M_TWS, M_WAITOK | M_ZERO);
- if ( ! sc->trace_q.q )
- return(FAILURE);
return(SUCCESS);
}
diff --git a/sys/dev/uart/uart_cpu_fdt.c b/sys/dev/uart/uart_cpu_fdt.c
index e73ff6a63bd9..daa4d581177a 100644
--- a/sys/dev/uart/uart_cpu_fdt.c
+++ b/sys/dev/uart/uart_cpu_fdt.c
@@ -70,53 +70,6 @@ uart_cpu_eqres(struct uart_bas *b1, struct uart_bas *b2)
return ((pmap_kextract(b1->bsh) == pmap_kextract(b2->bsh)) ? 1 : 0);
}
-static int
-phandle_chosen_propdev(phandle_t chosen, const char *name, phandle_t *node)
-{
- char buf[64];
-
- if (OF_getprop(chosen, name, buf, sizeof(buf)) <= 0)
- return (ENXIO);
- if ((*node = OF_finddevice(buf)) == -1)
- return (ENXIO);
-
- return (0);
-}
-
-static const struct ofw_compat_data *
-uart_fdt_find_compatible(phandle_t node, const struct ofw_compat_data *cd)
-{
- const struct ofw_compat_data *ocd;
-
- for (ocd = cd; ocd->ocd_str != NULL; ocd++) {
- if (fdt_is_compatible(node, ocd->ocd_str))
- return (ocd);
- }
- return (NULL);
-}
-
-static uintptr_t
-uart_fdt_find_by_node(phandle_t node, int class_list)
-{
- struct ofw_compat_data **cd;
- const struct ofw_compat_data *ocd;
-
- if (class_list) {
- SET_FOREACH(cd, uart_fdt_class_set) {
- ocd = uart_fdt_find_compatible(node, *cd);
- if ((ocd != NULL) && (ocd->ocd_data != 0))
- return (ocd->ocd_data);
- }
- } else {
- SET_FOREACH(cd, uart_fdt_class_and_device_set) {
- ocd = uart_fdt_find_compatible(node, *cd);
- if ((ocd != NULL) && (ocd->ocd_data != 0))
- return (ocd->ocd_data);
- }
- }
- return (0);
-}
-
int
uart_cpu_getdev(int devtype, struct uart_devinfo *di)
{
diff --git a/sys/dev/usb/controller/ehci_ixp4xx.c b/sys/dev/usb/controller/ehci_ixp4xx.c
index 05efbb51c129..1b2f6409d053 100644
--- a/sys/dev/usb/controller/ehci_ixp4xx.c
+++ b/sys/dev/usb/controller/ehci_ixp4xx.c
@@ -147,13 +147,13 @@ ehci_ixp_attach(device_t self)
isc->iot = rman_get_bustag(sc->sc_io_res);
isc->tag.bs_privdata = isc->iot;
/* read single */
- isc->tag.bs_r_1 = ehci_bs_r_1,
- isc->tag.bs_r_2 = ehci_bs_r_2,
- isc->tag.bs_r_4 = ehci_bs_r_4,
+ isc->tag.bs_r_1 = ehci_bs_r_1;
+ isc->tag.bs_r_2 = ehci_bs_r_2;
+ isc->tag.bs_r_4 = ehci_bs_r_4;
/* write (single) */
- isc->tag.bs_w_1 = ehci_bs_w_1,
- isc->tag.bs_w_2 = ehci_bs_w_2,
- isc->tag.bs_w_4 = ehci_bs_w_4,
+ isc->tag.bs_w_1 = ehci_bs_w_1;
+ isc->tag.bs_w_2 = ehci_bs_w_2;
+ isc->tag.bs_w_4 = ehci_bs_w_4;
sc->sc_io_tag = &isc->tag;
sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res);
diff --git a/sys/dev/usb/controller/generic_ehci.c b/sys/dev/usb/controller/generic_ehci.c
new file mode 100644
index 000000000000..fcae7dd41a99
--- /dev/null
+++ b/sys/dev/usb/controller/generic_ehci.c
@@ -0,0 +1,220 @@
+/*-
+ * Copyright (c) 2012 Ganbold Tsagaankhuu <ganbold@freebsd.org>
+ * Copyright (c) 2016 The FreeBSD Foundation
+ * All rights reserved.
+ *
+ * This software was developed by Andrew Turner under
+ * sponsorship from the FreeBSD Foundation.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ */
+
+/*
+ * Generic EHCI driver based on the Allwinner A10 EHCI driver
+ */
+
+#include <sys/cdefs.h>
+__FBSDID("$FreeBSD$");
+
+#include "opt_bus.h"
+
+#include <sys/param.h>
+#include <sys/systm.h>
+#include <sys/bus.h>
+#include <sys/rman.h>
+#include <sys/condvar.h>
+#include <sys/kernel.h>
+#include <sys/module.h>
+
+#include <machine/bus.h>
+
+#include <dev/usb/usb.h>
+#include <dev/usb/usbdi.h>
+
+#include <dev/usb/usb_core.h>
+#include <dev/usb/usb_busdma.h>
+#include <dev/usb/usb_process.h>
+#include <dev/usb/usb_util.h>
+
+#include <dev/usb/usb_controller.h>
+#include <dev/usb/usb_bus.h>
+#include <dev/usb/controller/ehci.h>
+#include <dev/usb/controller/ehcireg.h>
+
+#include <contrib/dev/acpica/include/acpi.h>
+#include <contrib/dev/acpica/include/accommon.h>
+#include <dev/acpica/acpivar.h>
+
+static device_attach_t generic_ehci_attach;
+static device_detach_t generic_ehci_detach;
+
+static int
+generic_ehci_probe(device_t self)
+{
+ ACPI_HANDLE h;
+
+ if ((h = acpi_get_handle(self)) == NULL ||
+ !acpi_MatchHid(h, "PNP0D20"))
+ return (ENXIO);
+
+ device_set_desc(self, "Generic EHCI Controller");
+ return (BUS_PROBE_DEFAULT);
+}
+
+static int
+generic_ehci_attach(device_t self)
+{
+ ehci_softc_t *sc = device_get_softc(self);
+ int err;
+ int rid;
+
+ /* initialise some bus fields */
+ sc->sc_bus.parent = self;
+ sc->sc_bus.devices = sc->sc_devices;
+ sc->sc_bus.devices_max = EHCI_MAX_DEVICES;
+ sc->sc_bus.dma_bits = 32;
+
+ /* get all DMA memory */
+ if (usb_bus_mem_alloc_all(&sc->sc_bus,
+ USB_GET_DMA_TAG(self), &ehci_iterate_hw_softc)) {
+ return (ENOMEM);
+ }
+
+ sc->sc_bus.usbrev = USB_REV_2_0;
+
+ rid = 0;
+ sc->sc_io_res = bus_alloc_resource_any(self, SYS_RES_MEMORY, &rid,
+ RF_ACTIVE);
+ if (!sc->sc_io_res) {
+ device_printf(self, "Could not map memory\n");
+ goto error;
+ }
+
+ sc->sc_io_tag = rman_get_bustag(sc->sc_io_res);
+ sc->sc_io_hdl = rman_get_bushandle(sc->sc_io_res);
+ sc->sc_io_size = rman_get_size(sc->sc_io_res);
+
+ rid = 0;
+ sc->sc_irq_res = bus_alloc_resource_any(self, SYS_RES_IRQ, &rid,
+ RF_SHAREABLE | RF_ACTIVE);
+ if (sc->sc_irq_res == NULL) {
+ device_printf(self, "Could not allocate irq\n");
+ goto error;
+ }
+ sc->sc_bus.bdev = device_add_child(self, "usbus", -1);
+ if (!sc->sc_bus.bdev) {
+ device_printf(self, "Could not add USB device\n");
+ goto error;
+ }
+ device_set_ivars(sc->sc_bus.bdev, &sc->sc_bus);
+
+ strlcpy(sc->sc_vendor, "Generic", sizeof(sc->sc_vendor));
+
+ err = bus_setup_intr(self, sc->sc_irq_res, INTR_TYPE_BIO | INTR_MPSAFE,
+ NULL, (driver_intr_t *)ehci_interrupt, sc, &sc->sc_intr_hdl);
+ if (err) {
+ device_printf(self, "Could not setup irq, %d\n", err);
+ sc->sc_intr_hdl = NULL;
+ goto error;
+ }
+
+ sc->sc_flags |= EHCI_SCFLG_DONTRESET;
+
+ err = ehci_init(sc);
+ if (!err)
+ err = device_probe_and_attach(sc->sc_bus.bdev);
+ if (err)
+ goto error;
+
+ return (0);
+
+error:
+ generic_ehci_detach(self);
+ return (ENXIO);
+}
+
+static int
+generic_ehci_detach(device_t self)
+{
+ ehci_softc_t *sc = device_get_softc(self);
+ device_t bdev;
+ int err;
+
+ if (sc->sc_bus.bdev) {
+ bdev = sc->sc_bus.bdev;
+ device_detach(bdev);
+ device_delete_child(self, bdev);
+ }
+ /* during module unload there are lots of children leftover */
+ device_delete_children(self);
+
+ if (sc->sc_irq_res && sc->sc_intr_hdl) {
+ /*
+ * only call ehci_detach() after ehci_init()
+ */
+ ehci_detach(sc);
+
+ err = bus_teardown_intr(self, sc->sc_irq_res, sc->sc_intr_hdl);
+
+ if (err)
+ /* XXX or should we panic? */
+ device_printf(self, "Could not tear down irq, %d\n",
+ err);
+ sc->sc_intr_hdl = NULL;
+ }
+
+ if (sc->sc_irq_res) {
+ bus_release_resource(self, SYS_RES_IRQ, 0, sc->sc_irq_res);
+ sc->sc_irq_res = NULL;
+ }
+ if (sc->sc_io_res) {
+ bus_release_resource(self, SYS_RES_MEMORY, 0,
+ sc->sc_io_res);
+ sc->sc_io_res = NULL;
+ }
+ usb_bus_mem_free_all(&sc->sc_bus, &ehci_iterate_hw_softc);
+
+ return (0);
+}
+
+static device_method_t ehci_methods[] = {
+ /* Device interface */
+ DEVMETHOD(device_probe, generic_ehci_probe),
+ DEVMETHOD(device_attach, generic_ehci_attach),
+ DEVMETHOD(device_detach, generic_ehci_detach),
+ DEVMETHOD(device_suspend, bus_generic_suspend),
+ DEVMETHOD(device_resume, bus_generic_resume),
+ DEVMETHOD(device_shutdown, bus_generic_shutdown),
+
+ DEVMETHOD_END
+};
+
+static driver_t ehci_driver = {
+ .name = "ehci",
+ .methods = ehci_methods,
+ .size = sizeof(ehci_softc_t),
+};
+
+static devclass_t ehci_devclass;
+
+DRIVER_MODULE(ehci, acpi, ehci_driver, ehci_devclass, 0, 0);
+MODULE_DEPEND(ehci, usb, 1, 1, 1);
diff --git a/sys/dev/usb/input/ukbd.c b/sys/dev/usb/input/ukbd.c
index 02a6ff37e8d4..42d96cfa64db 100644
--- a/sys/dev/usb/input/ukbd.c
+++ b/sys/dev/usb/input/ukbd.c
@@ -198,6 +198,7 @@ struct ukbd_softc {
int sc_mode; /* input mode (K_XLATE,K_RAW,K_CODE) */
int sc_state; /* shift/lock key state */
int sc_accents; /* accent key index (> 0) */
+ int sc_polling; /* polling recursion count */
int sc_led_size;
int sc_kbd_size;
@@ -1983,7 +1984,16 @@ ukbd_poll(keyboard_t *kbd, int on)
struct ukbd_softc *sc = kbd->kb_data;
UKBD_LOCK();
- if (on) {
+ /*
+ * Keep a reference count on polling to allow recursive
+ * cngrab() during a panic for example.
+ */
+ if (on)
+ sc->sc_polling++;
+ else
+ sc->sc_polling--;
+
+ if (sc->sc_polling != 0) {
sc->sc_flags |= UKBD_FLAG_POLLING;
sc->sc_poll_thread = curthread;
} else {
diff --git a/sys/dev/usb/serial/u3g.c b/sys/dev/usb/serial/u3g.c
index 9988347243c4..0aa26bbffe0b 100644
--- a/sys/dev/usb/serial/u3g.c
+++ b/sys/dev/usb/serial/u3g.c
@@ -1100,6 +1100,7 @@ u3g_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
{
struct u3g_softc *sc = ucom->sc_parent;
+ /* XXX Note: sc_lsr is always zero */
*lsr = sc->sc_lsr[ucom->sc_subunit];
*msr = sc->sc_msr[ucom->sc_subunit];
}
diff --git a/sys/dev/usb/serial/uark.c b/sys/dev/usb/serial/uark.c
index e0e6d0bc9049..a1c8976494b4 100644
--- a/sys/dev/usb/serial/uark.c
+++ b/sys/dev/usb/serial/uark.c
@@ -427,6 +427,7 @@ uark_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
{
struct uark_softc *sc = ucom->sc_parent;
+ /* XXX Note: sc_lsr is always zero */
*lsr = sc->sc_lsr;
*msr = sc->sc_msr;
}
diff --git a/sys/dev/usb/serial/ubsa.c b/sys/dev/usb/serial/ubsa.c
index 5fac56a27666..74159687e76c 100644
--- a/sys/dev/usb/serial/ubsa.c
+++ b/sys/dev/usb/serial/ubsa.c
@@ -650,11 +650,19 @@ ubsa_intr_callback(struct usb_xfer *xfer, usb_error_t error)
usbd_copy_out(pc, 0, buf, sizeof(buf));
/*
- * incidentally, Belkin adapter status bits match
- * UART 16550 bits
+ * MSR bits need translation from ns16550 to SER_* values.
+ * LSR bits are ns16550 in hardware and ucom.
*/
+ sc->sc_msr = 0;
+ if (buf[3] & UBSA_MSR_CTS)
+ sc->sc_msr |= SER_CTS;
+ if (buf[3] & UBSA_MSR_DCD)
+ sc->sc_msr |= SER_DCD;
+ if (buf[3] & UBSA_MSR_RI)
+ sc->sc_msr |= SER_RI;
+ if (buf[3] & UBSA_MSR_DSR)
+ sc->sc_msr |= SER_DSR;
sc->sc_lsr = buf[2];
- sc->sc_msr = buf[3];
DPRINTF("lsr = 0x%02x, msr = 0x%02x\n",
sc->sc_lsr, sc->sc_msr);
@@ -663,7 +671,7 @@ ubsa_intr_callback(struct usb_xfer *xfer, usb_error_t error)
} else {
DPRINTF("ignoring short packet, %d bytes\n", actlen);
}
-
+ /* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
diff --git a/sys/dev/usb/serial/uchcom.c b/sys/dev/usb/serial/uchcom.c
index d1063fd4f634..e3fdeedc8a23 100644
--- a/sys/dev/usb/serial/uchcom.c
+++ b/sys/dev/usb/serial/uchcom.c
@@ -625,6 +625,7 @@ uchcom_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
DPRINTF("\n");
+ /* XXX Note: sc_lsr is always zero */
*lsr = sc->sc_lsr;
*msr = sc->sc_msr;
}
diff --git a/sys/dev/usb/serial/ufoma.c b/sys/dev/usb/serial/ufoma.c
index fb526cd57e6d..dfb41e1871d0 100644
--- a/sys/dev/usb/serial/ufoma.c
+++ b/sys/dev/usb/serial/ufoma.c
@@ -900,6 +900,7 @@ ufoma_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
{
struct ufoma_softc *sc = ucom->sc_parent;
+ /* XXX Note: sc_lsr is always zero */
*lsr = sc->sc_lsr;
*msr = sc->sc_msr;
}
diff --git a/sys/dev/usb/serial/umcs.c b/sys/dev/usb/serial/umcs.c
index 6b55b5a8450b..c5ac0d6c52aa 100644
--- a/sys/dev/usb/serial/umcs.c
+++ b/sys/dev/usb/serial/umcs.c
@@ -743,15 +743,26 @@ umcs7840_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
{
struct umcs7840_softc *sc = ucom->sc_parent;
uint8_t pn = ucom->sc_portno;
- uint8_t hw_lsr = 0; /* local line status register */
uint8_t hw_msr = 0; /* local modem status register */
- /* Read LSR & MSR */
- umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_LSR, &hw_lsr);
+ /*
+ * Read status registers. MSR bits need translation from ns16550 to
+ * SER_* values. LSR bits are ns16550 in hardware and ucom.
+ */
+ umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_LSR, lsr);
umcs7840_get_UART_reg_sync(sc, pn, MCS7840_UART_REG_MSR, &hw_msr);
- *lsr = hw_lsr;
- *msr = hw_msr;
+ if (hw_msr & MCS7840_UART_MSR_NEGCTS)
+ *msr |= SER_CTS;
+
+ if (hw_msr & MCS7840_UART_MSR_NEGDCD)
+ *msr |= SER_DCD;
+
+ if (hw_msr & MCS7840_UART_MSR_NEGRI)
+ *msr |= SER_RI;
+
+ if (hw_msr & MCS7840_UART_MSR_NEGDSR)
+ *msr |= SER_DSR;
DPRINTF("Port %d status: LSR=%02x MSR=%02x\n", ucom->sc_portno, *lsr, *msr);
}
diff --git a/sys/dev/usb/serial/umct.c b/sys/dev/usb/serial/umct.c
index 32c789743723..251e0790fce7 100644
--- a/sys/dev/usb/serial/umct.c
+++ b/sys/dev/usb/serial/umct.c
@@ -86,6 +86,15 @@ __FBSDID("$FreeBSD$");
#define UMCT_SET_MCR 10 /* Set Modem Control Register */
#define UMCT_SET_MCR_SIZE 1
+#define UMCT_MSR_CTS_CHG 0x01
+#define UMCT_MSR_DSR_CHG 0x02
+#define UMCT_MSR_RI_CHG 0x04
+#define UMCT_MSR_CD_CHG 0x08
+#define UMCT_MSR_CTS 0x10
+#define UMCT_MSR_RTS 0x20
+#define UMCT_MSR_RI 0x40
+#define UMCT_MSR_CD 0x80
+
#define UMCT_INTR_INTERVAL 100
#define UMCT_IFACE_INDEX 0
#define UMCT_CONFIG_INDEX 0
@@ -384,11 +393,23 @@ umct_intr_callback_sub(struct usb_xfer *xfer, usb_error_t error)
pc = usbd_xfer_get_frame(xfer, 0);
usbd_copy_out(pc, 0, buf, sizeof(buf));
- sc->sc_msr = buf[0];
+ /*
+ * MSR bits need translation from ns16550 to SER_* values.
+ * LSR bits are ns16550 in hardware and ucom.
+ */
+ sc->sc_msr = 0;
+ if (buf[0] & UMCT_MSR_CTS)
+ sc->sc_msr |= SER_CTS;
+ if (buf[0] & UMCT_MSR_CD)
+ sc->sc_msr |= SER_DCD;
+ if (buf[0] & UMCT_MSR_RI)
+ sc->sc_msr |= SER_RI;
+ if (buf[0] & UMCT_MSR_RTS)
+ sc->sc_msr |= SER_DSR;
sc->sc_lsr = buf[1];
ucom_status_change(&sc->sc_ucom);
-
+ /* FALLTHROUGH */
case USB_ST_SETUP:
tr_setup:
usbd_xfer_set_frame_len(xfer, 0, usbd_xfer_max_len(xfer));
diff --git a/sys/dev/usb/serial/umodem.c b/sys/dev/usb/serial/umodem.c
index 5e65c12e1cb7..b9f798b479ae 100644
--- a/sys/dev/usb/serial/umodem.c
+++ b/sys/dev/usb/serial/umodem.c
@@ -558,6 +558,7 @@ umodem_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
DPRINTF("\n");
+ /* XXX Note: sc_lsr is always zero */
*lsr = sc->sc_lsr;
*msr = sc->sc_msr;
}
diff --git a/sys/dev/usb/serial/umoscom.c b/sys/dev/usb/serial/umoscom.c
index 8c580dabb59c..daa83c54feab 100644
--- a/sys/dev/usb/serial/umoscom.c
+++ b/sys/dev/usb/serial/umoscom.c
@@ -523,14 +523,16 @@ static void
umoscom_cfg_get_status(struct ucom_softc *ucom, uint8_t *p_lsr, uint8_t *p_msr)
{
struct umoscom_softc *sc = ucom->sc_parent;
- uint8_t lsr;
uint8_t msr;
DPRINTFN(5, "\n");
- /* read status registers */
+ /*
+ * Read status registers. MSR bits need translation from ns16550 to
+ * SER_* values. LSR bits are ns16550 in hardware and ucom.
+ */
- lsr = umoscom_cfg_read(sc, UMOSCOM_LSR);
+ *p_lsr = umoscom_cfg_read(sc, UMOSCOM_LSR);
msr = umoscom_cfg_read(sc, UMOSCOM_MSR);
/* translate bits */
diff --git a/sys/dev/usb/serial/uplcom.c b/sys/dev/usb/serial/uplcom.c
index 6e238758fa26..dfc0933d4f09 100644
--- a/sys/dev/usb/serial/uplcom.c
+++ b/sys/dev/usb/serial/uplcom.c
@@ -807,6 +807,7 @@ uplcom_cfg_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
DPRINTF("\n");
+ /* XXX Note: sc_lsr is always zero */
*lsr = sc->sc_lsr;
*msr = sc->sc_msr;
}
diff --git a/sys/dev/usb/serial/uslcom.c b/sys/dev/usb/serial/uslcom.c
index b516bdec3641..063f715eb8f3 100644
--- a/sys/dev/usb/serial/uslcom.c
+++ b/sys/dev/usb/serial/uslcom.c
@@ -704,6 +704,7 @@ uslcom_get_status(struct ucom_softc *ucom, uint8_t *lsr, uint8_t *msr)
DPRINTF("\n");
+ /* XXX Note: sc_lsr is always zero */
*lsr = sc->sc_lsr;
*msr = sc->sc_msr;
}
diff --git a/sys/dev/usb/template/usb_template_mtp.c b/sys/dev/usb/template/usb_template_mtp.c
index f0528a52aa89..d428e992f062 100644
--- a/sys/dev/usb/template/usb_template_mtp.c
+++ b/sys/dev/usb/template/usb_template_mtp.c
@@ -26,7 +26,7 @@
*/
/*
- * This file contains the USB templates for an USB Message Transfer
+ * This file contains the USB templates for an USB Media Transfer
* Protocol device.
*
* NOTE: It is common practice that MTP devices use some dummy
diff --git a/sys/dev/usb/usb_device.c b/sys/dev/usb/usb_device.c
index a5c2444ec859..c1b21df58119 100644
--- a/sys/dev/usb/usb_device.c
+++ b/sys/dev/usb/usb_device.c
@@ -1724,8 +1724,8 @@ usb_alloc_device(device_t parent_dev, struct usb_bus *bus,
/* Setup USB descriptors */
err = (usb_temp_setup_by_index_p) (udev, usb_template);
if (err) {
- DPRINTFN(0, "setting up USB template failed maybe the USB "
- "template module has not been loaded\n");
+ DPRINTFN(0, "setting up USB template failed - "
+ "usb_template(4) not loaded?\n");
goto done;
}
}
diff --git a/sys/dev/usb/usbdevs b/sys/dev/usb/usbdevs
index 44d93212bfeb..5cd917476fb5 100644
--- a/sys/dev/usb/usbdevs
+++ b/sys/dev/usb/usbdevs
@@ -2729,6 +2729,7 @@ product LOGITECH BB13 0xc401 USB-PS/2 Trackball
product LOGITECH RK53 0xc501 Cordless mouse
product LOGITECH RB6 0xc503 Cordless keyboard
product LOGITECH MX700 0xc506 Cordless optical mouse
+product LOGITECH UNIFYING 0xc52b Logitech Unifying Receiver
product LOGITECH QUICKCAMPRO2 0xd001 QuickCam Pro
/* Logitec Corp. products */
diff --git a/sys/dev/virtio/network/if_vtnet.c b/sys/dev/virtio/network/if_vtnet.c
index 6b89b869242e..37189753012e 100644
--- a/sys/dev/virtio/network/if_vtnet.c
+++ b/sys/dev/virtio/network/if_vtnet.c
@@ -230,18 +230,32 @@ static void vtnet_disable_interrupts(struct vtnet_softc *);
static int vtnet_tunable_int(struct vtnet_softc *, const char *, int);
/* Tunables. */
+static SYSCTL_NODE(_hw, OID_AUTO, vtnet, CTLFLAG_RD, 0, "VNET driver parameters");
static int vtnet_csum_disable = 0;
TUNABLE_INT("hw.vtnet.csum_disable", &vtnet_csum_disable);
+SYSCTL_INT(_hw_vtnet, OID_AUTO, csum_disable, CTLFLAG_RDTUN,
+ &vtnet_csum_disable, 0, "Disables receive and send checksum offload");
static int vtnet_tso_disable = 0;
TUNABLE_INT("hw.vtnet.tso_disable", &vtnet_tso_disable);
+SYSCTL_INT(_hw_vtnet, OID_AUTO, tso_disable, CTLFLAG_RDTUN, &vtnet_tso_disable,
+ 0, "Disables TCP Segmentation Offload");
static int vtnet_lro_disable = 0;
TUNABLE_INT("hw.vtnet.lro_disable", &vtnet_lro_disable);
+SYSCTL_INT(_hw_vtnet, OID_AUTO, lro_disable, CTLFLAG_RDTUN, &vtnet_lro_disable,
+ 0, "Disables TCP Large Receive Offload");
static int vtnet_mq_disable = 0;
TUNABLE_INT("hw.vtnet.mq_disable", &vtnet_mq_disable);
-static int vtnet_mq_max_pairs = 0;
+SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_disable, CTLFLAG_RDTUN, &vtnet_mq_disable,
+ 0, "Disables Multi Queue support");
+static int vtnet_mq_max_pairs = VTNET_MAX_QUEUE_PAIRS;
TUNABLE_INT("hw.vtnet.mq_max_pairs", &vtnet_mq_max_pairs);
+SYSCTL_INT(_hw_vtnet, OID_AUTO, mq_max_pairs, CTLFLAG_RDTUN,
+ &vtnet_mq_max_pairs, 0, "Sets the maximum number of Multi Queue pairs");
static int vtnet_rx_process_limit = 512;
TUNABLE_INT("hw.vtnet.rx_process_limit", &vtnet_rx_process_limit);
+SYSCTL_INT(_hw_vtnet, OID_AUTO, rx_process_limit, CTLFLAG_RDTUN,
+ &vtnet_rx_process_limit, 0,
+ "Limits the number RX segments processed in a single pass");
static uma_zone_t vtnet_tx_header_zone;
@@ -597,7 +611,6 @@ static void
vtnet_setup_features(struct vtnet_softc *sc)
{
device_t dev;
- int max_pairs, max;
dev = sc->vtnet_dev;
@@ -646,32 +659,31 @@ vtnet_setup_features(struct vtnet_softc *sc)
if (virtio_with_feature(dev, VIRTIO_NET_F_MQ) &&
sc->vtnet_flags & VTNET_FLAG_CTRL_VQ) {
- max_pairs = virtio_read_dev_config_2(dev,
+ sc->vtnet_max_vq_pairs = virtio_read_dev_config_2(dev,
offsetof(struct virtio_net_config, max_virtqueue_pairs));
- if (max_pairs < VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN ||
- max_pairs > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
- max_pairs = 1;
} else
- max_pairs = 1;
+ sc->vtnet_max_vq_pairs = 1;
- if (max_pairs > 1) {
+ if (sc->vtnet_max_vq_pairs > 1) {
/*
- * Limit the maximum number of queue pairs to the number of
- * CPUs or the configured maximum. The actual number of
- * queues that get used may be less.
+ * Limit the maximum number of queue pairs to the lower of
+ * the number of CPUs and the configured maximum.
+ * The actual number of queues that get used may be less.
*/
+ int max;
+
max = vtnet_tunable_int(sc, "mq_max_pairs", vtnet_mq_max_pairs);
- if (max > 0 && max_pairs > max)
- max_pairs = max;
- if (max_pairs > mp_ncpus)
- max_pairs = mp_ncpus;
- if (max_pairs > VTNET_MAX_QUEUE_PAIRS)
- max_pairs = VTNET_MAX_QUEUE_PAIRS;
- if (max_pairs > 1)
- sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
+ if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN) {
+ if (max > mp_ncpus)
+ max = mp_ncpus;
+ if (max > VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX)
+ max = VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX;
+ if (max > 1) {
+ sc->vtnet_requested_vq_pairs = max;
+ sc->vtnet_flags |= VTNET_FLAG_MULTIQ;
+ }
+ }
}
-
- sc->vtnet_max_vq_pairs = max_pairs;
}
static int
@@ -2982,13 +2994,11 @@ vtnet_set_active_vq_pairs(struct vtnet_softc *sc)
dev = sc->vtnet_dev;
if ((sc->vtnet_flags & VTNET_FLAG_MULTIQ) == 0) {
- MPASS(sc->vtnet_max_vq_pairs == 1);
sc->vtnet_act_vq_pairs = 1;
return;
}
- /* BMV: Just use the maximum configured for now. */
- npairs = sc->vtnet_max_vq_pairs;
+ npairs = sc->vtnet_requested_vq_pairs;
if (vtnet_ctrl_mq_cmd(sc, npairs) != 0) {
device_printf(dev,
@@ -3852,6 +3862,9 @@ vtnet_setup_sysctl(struct vtnet_softc *sc)
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "max_vq_pairs",
CTLFLAG_RD, &sc->vtnet_max_vq_pairs, 0,
"Maximum number of supported virtqueue pairs");
+ SYSCTL_ADD_INT(ctx, child, OID_AUTO, "requested_vq_pairs",
+ CTLFLAG_RD, &sc->vtnet_requested_vq_pairs, 0,
+ "Requested number of virtqueue pairs");
SYSCTL_ADD_INT(ctx, child, OID_AUTO, "act_vq_pairs",
CTLFLAG_RD, &sc->vtnet_act_vq_pairs, 0,
"Number of active virtqueue pairs");
diff --git a/sys/dev/virtio/network/if_vtnetvar.h b/sys/dev/virtio/network/if_vtnetvar.h
index f89f6b11fb21..15436d983ca9 100644
--- a/sys/dev/virtio/network/if_vtnetvar.h
+++ b/sys/dev/virtio/network/if_vtnetvar.h
@@ -155,6 +155,7 @@ struct vtnet_softc {
int vtnet_if_flags;
int vtnet_act_vq_pairs;
int vtnet_max_vq_pairs;
+ int vtnet_requested_vq_pairs;
struct virtqueue *vtnet_ctrl_vq;
struct vtnet_mac_filter *vtnet_mac_filter;
diff --git a/sys/dev/vt/vt_core.c b/sys/dev/vt/vt_core.c
index 32ac08048fea..e09e8a786b3c 100644
--- a/sys/dev/vt/vt_core.c
+++ b/sys/dev/vt/vt_core.c
@@ -2228,9 +2228,11 @@ skip_thunk:
return (EINVAL);
if (vw == vd->vd_curwindow) {
+ mtx_lock(&Giant);
kbd = kbd_get_keyboard(vd->vd_keyboard);
if (kbd != NULL)
vt_save_kbd_state(vw, kbd);
+ mtx_unlock(&Giant);
}
vi->m_num = vd->vd_curwindow->vw_number + 1;
diff --git a/sys/dev/xen/netfront/netfront.c b/sys/dev/xen/netfront/netfront.c
index 3d02a52d0ac0..a68bc9675da5 100644
--- a/sys/dev/xen/netfront/netfront.c
+++ b/sys/dev/xen/netfront/netfront.c
@@ -1760,7 +1760,7 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
#ifdef INET
struct ifaddr *ifa = (struct ifaddr *)data;
#endif
- int mask, error = 0;
+ int mask, error = 0, reinit;
dev = sc->xbdev;
@@ -1809,41 +1809,36 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
break;
case SIOCSIFCAP:
mask = ifr->ifr_reqcap ^ ifp->if_capenable;
+ reinit = 0;
+
if (mask & IFCAP_TXCSUM) {
- if (IFCAP_TXCSUM & ifp->if_capenable) {
- ifp->if_capenable &= ~(IFCAP_TXCSUM|IFCAP_TSO4);
- ifp->if_hwassist &= ~(CSUM_TCP | CSUM_UDP
- | CSUM_IP | CSUM_TSO);
- } else {
- ifp->if_capenable |= IFCAP_TXCSUM;
- ifp->if_hwassist |= (CSUM_TCP | CSUM_UDP
- | CSUM_IP);
- }
- }
- if (mask & IFCAP_RXCSUM) {
- ifp->if_capenable ^= IFCAP_RXCSUM;
+ ifp->if_capenable ^= IFCAP_TXCSUM;
+ ifp->if_hwassist ^= XN_CSUM_FEATURES;
}
if (mask & IFCAP_TSO4) {
- if (IFCAP_TSO4 & ifp->if_capenable) {
- ifp->if_capenable &= ~IFCAP_TSO4;
- ifp->if_hwassist &= ~CSUM_TSO;
- } else if (IFCAP_TXCSUM & ifp->if_capenable) {
- ifp->if_capenable |= IFCAP_TSO4;
- ifp->if_hwassist |= CSUM_TSO;
- } else {
- IPRINTK("Xen requires tx checksum offload"
- " be enabled to use TSO\n");
- error = EINVAL;
- }
+ ifp->if_capenable ^= IFCAP_TSO4;
+ ifp->if_hwassist ^= CSUM_TSO;
}
- if (mask & IFCAP_LRO) {
- ifp->if_capenable ^= IFCAP_LRO;
+ if (mask & (IFCAP_RXCSUM | IFCAP_LRO)) {
+ /* These Rx features require us to renegotiate. */
+ reinit = 1;
+
+ if (mask & IFCAP_RXCSUM)
+ ifp->if_capenable ^= IFCAP_RXCSUM;
+ if (mask & IFCAP_LRO)
+ ifp->if_capenable ^= IFCAP_LRO;
}
+
+ if (reinit == 0)
+ break;
+
/*
* We must reset the interface so the backend picks up the
* new features.
*/
+ device_printf(sc->xbdev,
+ "performing interface reset due to feature change\n");
XN_LOCK(sc);
netfront_carrier_off(sc);
sc->xn_reset = true;
@@ -1865,6 +1860,13 @@ xn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
xs_rm(XST_NIL, xenbus_get_node(dev), "feature-gso-tcpv4");
xs_rm(XST_NIL, xenbus_get_node(dev), "feature-no-csum-offload");
xenbus_set_state(dev, XenbusStateClosing);
+
+ /*
+ * Wait for the frontend to reconnect before returning
+ * from the ioctl. 30s should be more than enough for any
+ * sane backend to reconnect.
+ */
+ error = tsleep(sc, 0, "xn_rst", 30*hz);
break;
case SIOCADDMULTI:
case SIOCDELMULTI:
@@ -1971,6 +1973,7 @@ xn_connect(struct netfront_info *np)
* packets.
*/
netfront_carrier_on(np);
+ wakeup(np);
return (0);
}
@@ -2085,7 +2088,7 @@ xn_configure_features(struct netfront_info *np)
#endif
if ((ifp->if_capabilities & cap_enabled & IFCAP_TXCSUM) != 0) {
ifp->if_capenable |= IFCAP_TXCSUM;
- ifp->if_hwassist |= CSUM_TCP|CSUM_UDP;
+ ifp->if_hwassist |= XN_CSUM_FEATURES;
}
if ((ifp->if_capabilities & cap_enabled & IFCAP_RXCSUM) != 0)
ifp->if_capenable |= IFCAP_RXCSUM;
@@ -2157,6 +2160,9 @@ xn_txq_mq_start(struct ifnet *ifp, struct mbuf *m)
np = ifp->if_softc;
npairs = np->num_queues;
+ if (!netfront_carrier_ok(np))
+ return (ENOBUFS);
+
KASSERT(npairs != 0, ("called with 0 available queues"));
/* check if flowid is set */