From 9dda5c8ffc7466f940ed2ae0b056c9ae39e3208a Mon Sep 17 00:00:00 2001 From: Pyun YongHyeon Date: Thu, 18 Feb 2016 01:24:10 +0000 Subject: Fix variable assignment. Found by: PVS-Studio --- sys/dev/age/if_age.c | 2 +- sys/dev/alc/if_alc.c | 2 +- sys/dev/ale/if_ale.c | 2 +- sys/dev/jme/if_jme.c | 2 +- sys/dev/msk/if_msk.c | 2 +- sys/dev/stge/if_stge.c | 2 +- sys/dev/vte/if_vte.c | 2 +- 7 files changed, 7 insertions(+), 7 deletions(-) diff --git a/sys/dev/age/if_age.c b/sys/dev/age/if_age.c index c3a896203c1f..9e2fec80b0ad 100644 --- a/sys/dev/age/if_age.c +++ b/sys/dev/age/if_age.c @@ -588,7 +588,7 @@ age_attach(device_t dev) /* Create device sysctl node. */ age_sysctl_node(sc); - if ((error = age_dma_alloc(sc) != 0)) + if ((error = age_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ diff --git a/sys/dev/alc/if_alc.c b/sys/dev/alc/if_alc.c index d730eee0b615..5bfd60cfa46b 100644 --- a/sys/dev/alc/if_alc.c +++ b/sys/dev/alc/if_alc.c @@ -1532,7 +1532,7 @@ alc_attach(device_t dev) /* Create device sysctl node. */ alc_sysctl_node(sc); - if ((error = alc_dma_alloc(sc) != 0)) + if ((error = alc_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ diff --git a/sys/dev/ale/if_ale.c b/sys/dev/ale/if_ale.c index 3a728f4ad9ac..e4db2fa4ab62 100644 --- a/sys/dev/ale/if_ale.c +++ b/sys/dev/ale/if_ale.c @@ -603,7 +603,7 @@ ale_attach(device_t dev) /* Create device sysctl node. */ ale_sysctl_node(sc); - if ((error = ale_dma_alloc(sc) != 0)) + if ((error = ale_dma_alloc(sc)) != 0) goto fail; /* Load station address. */ diff --git a/sys/dev/jme/if_jme.c b/sys/dev/jme/if_jme.c index 35b25265c979..d3a1057b0303 100644 --- a/sys/dev/jme/if_jme.c +++ b/sys/dev/jme/if_jme.c @@ -804,7 +804,7 @@ jme_attach(device_t dev) } /* Create coalescing sysctl node. */ jme_sysctl_node(sc); - if ((error = jme_dma_alloc(sc) != 0)) + if ((error = jme_dma_alloc(sc)) != 0) goto fail; ifp = sc->jme_ifp = if_alloc(IFT_ETHER); diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c index 90474e4e411b..cc6d8e45ac2e 100644 --- a/sys/dev/msk/if_msk.c +++ b/sys/dev/msk/if_msk.c @@ -1623,7 +1623,7 @@ msk_attach(device_t dev) callout_init_mtx(&sc_if->msk_tick_ch, &sc_if->msk_softc->msk_mtx, 0); msk_sysctl_node(sc_if); - if ((error = msk_txrx_dma_alloc(sc_if) != 0)) + if ((error = msk_txrx_dma_alloc(sc_if)) != 0) goto fail; msk_rx_dma_jalloc(sc_if); diff --git a/sys/dev/stge/if_stge.c b/sys/dev/stge/if_stge.c index a93dda7b0e8b..0164eb83af37 100644 --- a/sys/dev/stge/if_stge.c +++ b/sys/dev/stge/if_stge.c @@ -508,7 +508,7 @@ stge_attach(device_t dev) } } - if ((error = stge_dma_alloc(sc) != 0)) + if ((error = stge_dma_alloc(sc)) != 0) goto fail; /* diff --git a/sys/dev/vte/if_vte.c b/sys/dev/vte/if_vte.c index e10b98829144..d3eb06a6ba38 100644 --- a/sys/dev/vte/if_vte.c +++ b/sys/dev/vte/if_vte.c @@ -428,7 +428,7 @@ vte_attach(device_t dev) /* Reset the ethernet controller. */ vte_reset(sc); - if ((error = vte_dma_alloc(sc) != 0)) + if ((error = vte_dma_alloc(sc)) != 0) goto fail; /* Create device sysctl node. */ -- cgit v1.2.3 From 90cf5d3043e7b9190328dc9d22303f28d37a0089 Mon Sep 17 00:00:00 2001 From: Pyun YongHyeon Date: Thu, 18 Feb 2016 01:30:49 +0000 Subject: Remove duplicated check. Found by: PVS-Studio --- sys/dev/rl/if_rl.c | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/sys/dev/rl/if_rl.c b/sys/dev/rl/if_rl.c index 9d0d7aa31d94..bc23125e5e29 100644 --- a/sys/dev/rl/if_rl.c +++ b/sys/dev/rl/if_rl.c @@ -1938,18 +1938,15 @@ rl_stop(struct rl_softc *sc) */ for (i = 0; i < RL_TX_LIST_CNT; i++) { if (sc->rl_cdata.rl_tx_chain[i] != NULL) { - if (sc->rl_cdata.rl_tx_chain[i] != NULL) { - bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, - sc->rl_cdata.rl_tx_dmamap[i], - BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, - sc->rl_cdata.rl_tx_dmamap[i]); - m_freem(sc->rl_cdata.rl_tx_chain[i]); - sc->rl_cdata.rl_tx_chain[i] = NULL; - } - CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), - 0x0000000); + bus_dmamap_sync(sc->rl_cdata.rl_tx_tag, + sc->rl_cdata.rl_tx_dmamap[i], + BUS_DMASYNC_POSTWRITE); + bus_dmamap_unload(sc->rl_cdata.rl_tx_tag, + sc->rl_cdata.rl_tx_dmamap[i]); + m_freem(sc->rl_cdata.rl_tx_chain[i]); + sc->rl_cdata.rl_tx_chain[i] = NULL; } + CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } } -- cgit v1.2.3 From 0386a98bd06247cdcc595354cf1b664b300aa3a0 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Thu, 18 Feb 2016 01:58:26 +0000 Subject: Use the _SAFE loop variant, since the loop body may remove queue entries. PR: 207146 MFC after: 3 days --- usr.sbin/rtsold/rtsold.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/usr.sbin/rtsold/rtsold.c b/usr.sbin/rtsold/rtsold.c index 1482798460df..9cfe5c304349 100644 --- a/usr.sbin/rtsold/rtsold.c +++ b/usr.sbin/rtsold/rtsold.c @@ -554,7 +554,7 @@ rtsol_check_timer(void) struct timespec now, rtsol_timer; struct ifinfo *ifi; struct rainfo *rai; - struct ra_opt *rao; + struct ra_opt *rao, *raotmp; int flags; clock_gettime(CLOCK_MONOTONIC_FAST, &now); @@ -649,7 +649,8 @@ rtsol_check_timer(void) int expire = 0; TAILQ_FOREACH(rai, &ifi->ifi_rainfo, rai_next) { - TAILQ_FOREACH(rao, &rai->rai_ra_opt, rao_next) { + TAILQ_FOREACH_SAFE(rao, &rai->rai_ra_opt, + rao_next, raotmp) { warnmsg(LOG_DEBUG, __func__, "RA expiration timer: " "type=%d, msg=%s, expire=%s", -- cgit v1.2.3 From 4a1ff07b39d4ddc113a10e401540ff5323875ef5 Mon Sep 17 00:00:00 2001 From: Pyun YongHyeon Date: Thu, 18 Feb 2016 03:05:08 +0000 Subject: Fix a bug introduced in r295736 TX descriptor address should be updated for valid chain. Pointed out by: jmallett --- sys/dev/rl/if_rl.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sys/dev/rl/if_rl.c b/sys/dev/rl/if_rl.c index bc23125e5e29..d9b79396b7c4 100644 --- a/sys/dev/rl/if_rl.c +++ b/sys/dev/rl/if_rl.c @@ -1945,8 +1945,9 @@ rl_stop(struct rl_softc *sc) sc->rl_cdata.rl_tx_dmamap[i]); m_freem(sc->rl_cdata.rl_tx_chain[i]); sc->rl_cdata.rl_tx_chain[i] = NULL; + CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), + 0x0000000); } - CSR_WRITE_4(sc, RL_TXADDR0 + (i * sizeof(uint32_t)), 0x0000000); } } -- cgit v1.2.3 From 7ae3d4bf547fbeb9dda13fb8136ad92ae909ae21 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 04:58:34 +0000 Subject: tcp/lro: Allow drivers to set the TCP ACK/data segment aggregation limit ACK aggregation limit is append count based, while the TCP data segment aggregation limit is length based. Unless the network driver sets these two limits, it's an NO-OP. Reviewed by: adrian, gallatin (previous version), hselasky (previous version) Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5185 --- sys/netinet/tcp_lro.c | 15 +++++++++++++-- sys/netinet/tcp_lro.h | 5 +++++ sys/sys/param.h | 2 +- 3 files changed, 19 insertions(+), 3 deletions(-) diff --git a/sys/netinet/tcp_lro.c b/sys/netinet/tcp_lro.c index 62d8595c3f70..7067abe6840f 100644 --- a/sys/netinet/tcp_lro.c +++ b/sys/netinet/tcp_lro.c @@ -88,6 +88,8 @@ tcp_lro_init_args(struct lro_ctrl *lc, struct ifnet *ifp, lc->lro_mbuf_count = 0; lc->lro_mbuf_max = lro_mbufs; lc->lro_cnt = lro_entries; + lc->lro_ackcnt_lim = TCP_LRO_ACKCNT_MAX; + lc->lro_length_lim = TCP_LRO_LENGTH_MAX; lc->ifp = ifp; SLIST_INIT(&lc->lro_free); SLIST_INIT(&lc->lro_active); @@ -610,7 +612,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) } /* Flush now if appending will result in overflow. */ - if (le->p_len > (65535 - tcp_data_len)) { + if (le->p_len > (lc->lro_length_lim - tcp_data_len)) { SLIST_REMOVE(&lc->lro_active, le, lro_entry, next); tcp_lro_flush(lc, le); break; @@ -648,6 +650,15 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) if (tcp_data_len == 0) { m_freem(m); + /* + * Flush this LRO entry, if this ACK should not + * be further delayed. + */ + if (le->append_cnt >= lc->lro_ackcnt_lim) { + SLIST_REMOVE(&lc->lro_active, le, lro_entry, + next); + tcp_lro_flush(lc, le); + } return (0); } @@ -668,7 +679,7 @@ tcp_lro_rx(struct lro_ctrl *lc, struct mbuf *m, uint32_t csum) * If a possible next full length packet would cause an * overflow, pro-actively flush now. */ - if (le->p_len > (65535 - lc->ifp->if_mtu)) { + if (le->p_len > (lc->lro_length_lim - lc->ifp->if_mtu)) { SLIST_REMOVE(&lc->lro_active, le, lro_entry, next); tcp_lro_flush(lc, le); } else diff --git a/sys/netinet/tcp_lro.h b/sys/netinet/tcp_lro.h index 48c679dec340..3fc627c31500 100644 --- a/sys/netinet/tcp_lro.h +++ b/sys/netinet/tcp_lro.h @@ -91,11 +91,16 @@ struct lro_ctrl { unsigned lro_cnt; unsigned lro_mbuf_count; unsigned lro_mbuf_max; + unsigned short lro_ackcnt_lim; /* max # of aggregated ACKs */ + unsigned lro_length_lim; /* max len of aggregated data */ struct lro_head lro_active; struct lro_head lro_free; }; +#define TCP_LRO_LENGTH_MAX 65535 +#define TCP_LRO_ACKCNT_MAX 65535 /* unlimited */ + int tcp_lro_init(struct lro_ctrl *); int tcp_lro_init_args(struct lro_ctrl *, struct ifnet *, unsigned, unsigned); void tcp_lro_free(struct lro_ctrl *); diff --git a/sys/sys/param.h b/sys/sys/param.h index bb53a93170f3..f9b4b280d2bd 100644 --- a/sys/sys/param.h +++ b/sys/sys/param.h @@ -58,7 +58,7 @@ * in the range 5 to 9. */ #undef __FreeBSD_version -#define __FreeBSD_version 1100098 /* Master, propagated to newvers */ +#define __FreeBSD_version 1100099 /* Master, propagated to newvers */ /* * __FreeBSD_kernel__ indicates that this system uses the kernel of FreeBSD, -- cgit v1.2.3 From aa7f74113e4de278e1742a6564fae6a49c4f1d34 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 04:59:37 +0000 Subject: hyperv/hn: Set the TCP ACK/data segment aggregation limit Set TCP ACK append limit to 1, i.e. aggregate 2 ACKs at most. Aggregating anything more than 2 hurts TCP sending performance in hyperv. This significantly improves the TCP sending performance when the number of concurrent connetion is low (2~8). And it greatly stabilizes the TCP sending performance in other cases. Set TCP data segments aggregation length limit to 37500. Without this limitation, hn(4) could aggregate ~45 TCP data segments for each connection (even at 64 or more connections) before dispatching them to socket code; large aggregation slows down ACK sending and eventually hurts/destabilizes TCP reception performance. This setting stabilizes and improves TCP reception performance for >4 concurrent connections significantly. Make them sysctls so they could be adjusted. Reviewed by: adrian, gallatin (previous version), hselasky (previous version) Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5185 --- sys/dev/hyperv/netvsc/hv_net_vsc.h | 1 - sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 94 +++++++++++++++------------ 2 files changed, 53 insertions(+), 42 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h index 4f52e0d7c7f9..f0b1c26199c7 100644 --- a/sys/dev/hyperv/netvsc/hv_net_vsc.h +++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h @@ -1030,7 +1030,6 @@ typedef struct hn_softc { struct task hn_txeof_task; struct lro_ctrl hn_lro; - int hn_lro_hiwat; /* Trust csum verification on host side */ int hn_trust_hcsum; /* HN_TRUST_HCSUM_ */ diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index b2a207905226..dea07797326c 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -176,14 +176,11 @@ struct hn_txdesc { #define HN_CSUM_ASSIST_WIN8 (CSUM_TCP) #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP) -/* XXX move to netinet/tcp_lro.h */ -#define HN_LRO_HIWAT_MAX 65535 -#define HN_LRO_HIWAT_DEF HN_LRO_HIWAT_MAX +#define HN_LRO_LENLIM_DEF (25 * ETHERMTU) /* YYY 2*MTU is a bit rough, but should be good enough. */ -#define HN_LRO_HIWAT_MTULIM(ifp) (2 * (ifp)->if_mtu) -#define HN_LRO_HIWAT_ISVALID(sc, hiwat) \ - ((hiwat) >= HN_LRO_HIWAT_MTULIM((sc)->hn_ifp) || \ - (hiwat) <= HN_LRO_HIWAT_MAX) +#define HN_LRO_LENLIM_MIN(ifp) (2 * (ifp)->if_mtu) + +#define HN_LRO_ACKCNT_DEF 1 /* * Be aware that this sleepable mutex will exhibit WITNESS errors when @@ -253,9 +250,8 @@ static void hn_start(struct ifnet *ifp); static void hn_start_txeof(struct ifnet *ifp); static int hn_ifmedia_upd(struct ifnet *ifp); static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); -#ifdef HN_LRO_HIWAT -static int hn_lro_hiwat_sysctl(SYSCTL_HANDLER_ARGS); -#endif +static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); +static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS); static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS); static int hn_check_iplen(const struct mbuf *, int); @@ -265,15 +261,6 @@ static void hn_start_taskfunc(void *xsc, int pending); static void hn_txeof_taskfunc(void *xsc, int pending); static int hn_encap(struct hn_softc *, struct hn_txdesc *, struct mbuf **); -static __inline void -hn_set_lro_hiwat(struct hn_softc *sc, int hiwat) -{ - sc->hn_lro_hiwat = hiwat; -#ifdef HN_LRO_HIWAT - sc->hn_lro.lro_hiwat = sc->hn_lro_hiwat; -#endif -} - static int hn_ifmedia_upd(struct ifnet *ifp __unused) { @@ -358,7 +345,6 @@ netvsc_attach(device_t dev) bzero(sc, sizeof(hn_softc_t)); sc->hn_unit = unit; sc->hn_dev = dev; - sc->hn_lro_hiwat = HN_LRO_HIWAT_DEF; sc->hn_direct_tx_size = hn_direct_tx_size; if (hn_trust_hosttcp) sc->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; @@ -442,9 +428,8 @@ netvsc_attach(device_t dev) /* Driver private LRO settings */ sc->hn_lro.ifp = ifp; #endif -#ifdef HN_LRO_HIWAT - sc->hn_lro.lro_hiwat = sc->hn_lro_hiwat; -#endif + sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; + sc->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; #endif /* INET || INET6 */ #if __FreeBSD_version >= 1100045 @@ -480,11 +465,12 @@ netvsc_attach(device_t dev) CTLFLAG_RW, &sc->hn_lro.lro_flushed, 0, "LRO flushed"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "lro_tried", CTLFLAG_RW, &sc->hn_lro_tried, "# of LRO tries"); -#ifdef HN_LRO_HIWAT - SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_hiwat", - CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_hiwat_sysctl, - "I", "LRO high watermark"); -#endif + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", + CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU", + "Max # of data bytes to be aggregated by LRO"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", + CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I", + "Max # of ACKs to be aggregated by LRO"); SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP, hn_trust_hcsum_sysctl, "I", @@ -1410,12 +1396,13 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) /* Obtain and record requested MTU */ ifp->if_mtu = ifr->ifr_mtu; + /* - * Make sure that LRO high watermark is still valid, - * after MTU change (the 2*MTU limit). + * Make sure that LRO aggregation length limit is still + * valid, after the MTU change. */ - if (!HN_LRO_HIWAT_ISVALID(sc, sc->hn_lro_hiwat)) - hn_set_lro_hiwat(sc, HN_LRO_HIWAT_MTULIM(ifp)); + if (sc->hn_lro.lro_length_lim < HN_LRO_LENLIM_MIN(ifp)) + sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_MIN(ifp); do { NV_LOCK(sc); @@ -1722,26 +1709,51 @@ hn_watchdog(struct ifnet *ifp) } #endif -#ifdef HN_LRO_HIWAT static int -hn_lro_hiwat_sysctl(SYSCTL_HANDLER_ARGS) +hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; - int hiwat, error; + unsigned int lenlim; + int error; - hiwat = sc->hn_lro_hiwat; - error = sysctl_handle_int(oidp, &hiwat, 0, req); + lenlim = sc->hn_lro.lro_length_lim; + error = sysctl_handle_int(oidp, &lenlim, 0, req); if (error || req->newptr == NULL) return error; - if (!HN_LRO_HIWAT_ISVALID(sc, hiwat)) + if (lenlim < HN_LRO_LENLIM_MIN(sc->hn_ifp) || + lenlim > TCP_LRO_LENGTH_MAX) return EINVAL; - if (sc->hn_lro_hiwat != hiwat) - hn_set_lro_hiwat(sc, hiwat); + sc->hn_lro.lro_length_lim = lenlim; + return 0; +} + +static int +hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct hn_softc *sc = arg1; + int ackcnt, error; + + /* + * lro_ackcnt_lim is append count limit, + * +1 to turn it into aggregation limit. + */ + ackcnt = sc->hn_lro.lro_ackcnt_lim + 1; + error = sysctl_handle_int(oidp, &ackcnt, 0, req); + if (error || req->newptr == NULL) + return error; + + if (ackcnt < 2 || ackcnt > (TCP_LRO_ACKCNT_MAX + 1)) + return EINVAL; + + /* + * Convert aggregation limit back to append + * count limit. + */ + sc->hn_lro.lro_ackcnt_lim = ackcnt - 1; return 0; } -#endif /* HN_LRO_HIWAT */ static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) -- cgit v1.2.3 From 58f5a606faa205f9a936b95017a3876a494135e2 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 06:55:05 +0000 Subject: hyperv/hn: Add option to allow sharing TX taskq between hn instances It is off by default. This eases further experimenting on this driver. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5272 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 42 +++++++++++++++++++++++---- 1 file changed, 37 insertions(+), 5 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index dea07797326c..3126955539d7 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -238,6 +238,11 @@ TUNABLE_INT("dev.hn.lro_entry_count", &hn_lro_entry_count); #endif #endif +static int hn_share_tx_taskq = 0; +TUNABLE_INT("hw.hn.share_tx_taskq", &hn_share_tx_taskq); + +static struct taskqueue *hn_tx_taskq; + /* * Forward declarations */ @@ -353,10 +358,14 @@ netvsc_attach(device_t dev) if (hn_trust_hostip) sc->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; - sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, - taskqueue_thread_enqueue, &sc->hn_tx_taskq); - taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", - device_get_nameunit(dev)); + if (hn_tx_taskq == NULL) { + sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, + taskqueue_thread_enqueue, &sc->hn_tx_taskq); + taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", + device_get_nameunit(dev)); + } else { + sc->hn_tx_taskq = hn_tx_taskq; + } TASK_INIT(&sc->hn_start_task, 0, hn_start_taskfunc, sc); TASK_INIT(&sc->hn_txeof_task, 0, hn_txeof_taskfunc, sc); @@ -602,7 +611,8 @@ netvsc_detach(device_t dev) taskqueue_drain(sc->hn_tx_taskq, &sc->hn_start_task); taskqueue_drain(sc->hn_tx_taskq, &sc->hn_txeof_task); - taskqueue_free(sc->hn_tx_taskq); + if (sc->hn_tx_taskq != hn_tx_taskq) + taskqueue_free(sc->hn_tx_taskq); ifmedia_removeall(&sc->hn_media); #if defined(INET) || defined(INET6) @@ -2039,6 +2049,28 @@ hn_txeof_taskfunc(void *xsc, int pending __unused) NV_UNLOCK(sc); } +static void +hn_tx_taskq_create(void *arg __unused) +{ + if (!hn_share_tx_taskq) + return; + + hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, + taskqueue_thread_enqueue, &hn_tx_taskq); + taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx"); +} +SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST, + hn_tx_taskq_create, NULL); + +static void +hn_tx_taskq_destroy(void *arg __unused) +{ + if (hn_tx_taskq != NULL) + taskqueue_free(hn_tx_taskq); +} +SYSUNINIT(hn_txtq_destroy, SI_SUB_DRIVERS, SI_ORDER_FIRST, + hn_tx_taskq_destroy, NULL); + static device_method_t netvsc_methods[] = { /* Device interface */ DEVMETHOD(device_probe, netvsc_probe), -- cgit v1.2.3 From 01610d88dafb0b09e0ba44784a25250faf28b38c Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 07:00:47 +0000 Subject: hyperv/hn: Always do transmission scheduling. This one gives the best performance so far. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5273 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 3126955539d7..a8556019327c 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -465,6 +465,13 @@ netvsc_attach(device_t dev) hn_tx_chimney_size < sc->hn_tx_chimney_max) sc->hn_tx_chimney_size = hn_tx_chimney_size; + /* + * Always schedule transmission instead of trying + * to do direct transmission. This one gives the + * best performance so far. + */ + sc->hn_sched_tx = 1; + ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); -- cgit v1.2.3 From f9c55d1c83d4e86e90b27fecc0887e03f11375b9 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 07:06:44 +0000 Subject: hyperv/hn: Change global tunable prefix to hw.hn And use SYSCTL+CTLFLAG_RDTUN for them. Suggested by: adrian Reviewed by: adrian, Hongjiang Zhang Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5274 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 74 +++++++++------------------ 1 file changed, 24 insertions(+), 50 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index a8556019327c..0cd61d4a0d51 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -205,41 +205,57 @@ struct hn_txdesc { int hv_promisc_mode = 0; /* normal mode by default */ +SYSCTL_NODE(_hw, OID_AUTO, hn, CTLFLAG_RD, NULL, "Hyper-V network interface"); + /* Trust tcp segements verification on host side. */ static int hn_trust_hosttcp = 1; -TUNABLE_INT("dev.hn.trust_hosttcp", &hn_trust_hosttcp); +SYSCTL_INT(_hw_hn, OID_AUTO, trust_hosttcp, CTLFLAG_RDTUN, + &hn_trust_hosttcp, 0, + "Trust tcp segement verification on host side, " + "when csum info is missing (global setting)"); /* Trust udp datagrams verification on host side. */ static int hn_trust_hostudp = 1; -TUNABLE_INT("dev.hn.trust_hostudp", &hn_trust_hostudp); +SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostudp, CTLFLAG_RDTUN, + &hn_trust_hostudp, 0, + "Trust udp datagram verification on host side, " + "when csum info is missing (global setting)"); /* Trust ip packets verification on host side. */ static int hn_trust_hostip = 1; -TUNABLE_INT("dev.hn.trust_hostip", &hn_trust_hostip); +SYSCTL_INT(_hw_hn, OID_AUTO, trust_hostip, CTLFLAG_RDTUN, + &hn_trust_hostip, 0, + "Trust ip packet verification on host side, " + "when csum info is missing (global setting)"); #if __FreeBSD_version >= 1100045 /* Limit TSO burst size */ static int hn_tso_maxlen = 0; -TUNABLE_INT("dev.hn.tso_maxlen", &hn_tso_maxlen); +SYSCTL_INT(_hw_hn, OID_AUTO, tso_maxlen, CTLFLAG_RDTUN, + &hn_tso_maxlen, 0, "TSO burst limit"); #endif /* Limit chimney send size */ static int hn_tx_chimney_size = 0; -TUNABLE_INT("dev.hn.tx_chimney_size", &hn_tx_chimney_size); +SYSCTL_INT(_hw_hn, OID_AUTO, tx_chimney_size, CTLFLAG_RDTUN, + &hn_tx_chimney_size, 0, "Chimney send packet size limit"); /* Limit the size of packet for direct transmission */ static int hn_direct_tx_size = HN_DIRECT_TX_SIZE_DEF; -TUNABLE_INT("dev.hn.direct_tx_size", &hn_direct_tx_size); +SYSCTL_INT(_hw_hn, OID_AUTO, direct_tx_size, CTLFLAG_RDTUN, + &hn_direct_tx_size, 0, "Size of the packet for direct transmission"); #if defined(INET) || defined(INET6) #if __FreeBSD_version >= 1100095 static int hn_lro_entry_count = HN_LROENT_CNT_DEF; -TUNABLE_INT("dev.hn.lro_entry_count", &hn_lro_entry_count); +SYSCTL_INT(_hw_hn, OID_AUTO, lro_entry_count, CTLFLAG_RDTUN, + &hn_lro_entry_count, 0, "LRO entry count"); #endif #endif static int hn_share_tx_taskq = 0; -TUNABLE_INT("hw.hn.share_tx_taskq", &hn_share_tx_taskq); +SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN, + &hn_share_tx_taskq, 0, "Enable shared TX taskqueue"); static struct taskqueue *hn_tx_taskq; @@ -541,48 +557,6 @@ netvsc_attach(device_t dev) "Always schedule transmission " "instead of doing direct transmission"); - if (unit == 0) { - struct sysctl_ctx_list *dc_ctx; - struct sysctl_oid_list *dc_child; - devclass_t dc; - - /* - * Add sysctl nodes for devclass - */ - dc = device_get_devclass(dev); - dc_ctx = devclass_get_sysctl_ctx(dc); - dc_child = SYSCTL_CHILDREN(devclass_get_sysctl_tree(dc)); - - SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hosttcp", - CTLFLAG_RD, &hn_trust_hosttcp, 0, - "Trust tcp segement verification on host side, " - "when csum info is missing (global setting)"); - SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hostudp", - CTLFLAG_RD, &hn_trust_hostudp, 0, - "Trust udp datagram verification on host side, " - "when csum info is missing (global setting)"); - SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "trust_hostip", - CTLFLAG_RD, &hn_trust_hostip, 0, - "Trust ip packet verification on host side, " - "when csum info is missing (global setting)"); - SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tx_chimney_size", - CTLFLAG_RD, &hn_tx_chimney_size, 0, - "Chimney send packet size limit"); -#if __FreeBSD_version >= 1100045 - SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "tso_maxlen", - CTLFLAG_RD, &hn_tso_maxlen, 0, "TSO burst limit"); -#endif - SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "direct_tx_size", - CTLFLAG_RD, &hn_direct_tx_size, 0, - "Size of the packet for direct transmission"); -#if defined(INET) || defined(INET6) -#if __FreeBSD_version >= 1100095 - SYSCTL_ADD_INT(dc_ctx, dc_child, OID_AUTO, "lro_entry_count", - CTLFLAG_RD, &hn_lro_entry_count, 0, "LRO entry count"); -#endif -#endif - } - return (0); failed: hn_destroy_tx_ring(sc); -- cgit v1.2.3 From 17ab6c4f172bdc35c5aa3cb3351b1785185bc285 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 07:16:31 +0000 Subject: hyperv/hn: Split RX ring data structure out of softc This paves the way for upcoming vRSS stuffs and eases more code cleanup. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5275 --- sys/dev/hyperv/netvsc/hv_net_vsc.h | 34 +-- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 340 ++++++++++++++++++-------- 2 files changed, 262 insertions(+), 112 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h index f0b1c26199c7..31f1e704ecc1 100644 --- a/sys/dev/hyperv/netvsc/hv_net_vsc.h +++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h @@ -993,6 +993,24 @@ typedef struct { struct hn_txdesc; SLIST_HEAD(hn_txdesc_list, hn_txdesc); +struct hn_rx_ring { + struct lro_ctrl hn_lro; + + /* Trust csum verification on host side */ + int hn_trust_hcsum; /* HN_TRUST_HCSUM_ */ + + u_long hn_csum_ip; + u_long hn_csum_tcp; + u_long hn_csum_udp; + u_long hn_csum_trusted; + u_long hn_lro_tried; + u_long hn_small_pkts; +} __aligned(CACHE_LINE_SIZE); + +#define HN_TRUST_HCSUM_IP 0x0001 +#define HN_TRUST_HCSUM_TCP 0x0002 +#define HN_TRUST_HCSUM_UDP 0x0004 + /* * Device-specific softc structure */ @@ -1029,17 +1047,9 @@ typedef struct hn_softc { struct task hn_start_task; struct task hn_txeof_task; - struct lro_ctrl hn_lro; - - /* Trust csum verification on host side */ - int hn_trust_hcsum; /* HN_TRUST_HCSUM_ */ + int hn_rx_ring_cnt; + struct hn_rx_ring *hn_rx_ring; - u_long hn_csum_ip; - u_long hn_csum_tcp; - u_long hn_csum_udp; - u_long hn_csum_trusted; - u_long hn_lro_tried; - u_long hn_small_pkts; u_long hn_no_txdescs; u_long hn_send_failed; u_long hn_txdma_failed; @@ -1047,10 +1057,6 @@ typedef struct hn_softc { u_long hn_tx_chimney; } hn_softc_t; -#define HN_TRUST_HCSUM_IP 0x0001 -#define HN_TRUST_HCSUM_TCP 0x0002 -#define HN_TRUST_HCSUM_UDP 0x0004 - /* * Externs */ diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 0cd61d4a0d51..dcf5b1a3949b 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -275,12 +275,16 @@ static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS); static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS); +static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); +static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS); static int hn_check_iplen(const struct mbuf *, int); static int hn_create_tx_ring(struct hn_softc *sc); static void hn_destroy_tx_ring(struct hn_softc *sc); static void hn_start_taskfunc(void *xsc, int pending); static void hn_txeof_taskfunc(void *xsc, int pending); static int hn_encap(struct hn_softc *, struct hn_txdesc *, struct mbuf **); +static void hn_create_rx_data(struct hn_softc *sc); +static void hn_destroy_rx_data(struct hn_softc *sc); static int hn_ifmedia_upd(struct ifnet *ifp __unused) @@ -351,11 +355,6 @@ netvsc_attach(device_t dev) int error; #if __FreeBSD_version >= 1100045 int tso_maxlen; -#endif -#if defined(INET) || defined(INET6) -#if __FreeBSD_version >= 1100095 - int lroent_cnt; -#endif #endif sc = device_get_softc(dev); @@ -367,12 +366,6 @@ netvsc_attach(device_t dev) sc->hn_unit = unit; sc->hn_dev = dev; sc->hn_direct_tx_size = hn_direct_tx_size; - if (hn_trust_hosttcp) - sc->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; - if (hn_trust_hostudp) - sc->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP; - if (hn_trust_hostip) - sc->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; if (hn_tx_taskq == NULL) { sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, @@ -396,6 +389,8 @@ netvsc_attach(device_t dev) ifp = sc->hn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = sc; + hn_create_rx_data(sc); + if_initname(ifp, device_get_name(dev), device_get_unit(dev)); ifp->if_dunit = unit; ifp->if_dname = NETVSC_DEVNAME; @@ -441,22 +436,6 @@ netvsc_attach(device_t dev) sc->hn_carrier = 1; } -#if defined(INET) || defined(INET6) -#if __FreeBSD_version >= 1100095 - lroent_cnt = hn_lro_entry_count; - if (lroent_cnt < TCP_LRO_ENTRIES) - lroent_cnt = TCP_LRO_ENTRIES; - tcp_lro_init_args(&sc->hn_lro, ifp, lroent_cnt, 0); - device_printf(dev, "LRO: entry count %d\n", lroent_cnt); -#else - tcp_lro_init(&sc->hn_lro); - /* Driver private LRO settings */ - sc->hn_lro.ifp = ifp; -#endif - sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; - sc->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; -#endif /* INET || INET6 */ - #if __FreeBSD_version >= 1100045 tso_maxlen = hn_tso_maxlen; if (tso_maxlen <= 0 || tso_maxlen > IP_MAXPACKET) @@ -491,44 +470,6 @@ netvsc_attach(device_t dev) ctx = device_get_sysctl_ctx(dev); child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); - SYSCTL_ADD_U64(ctx, child, OID_AUTO, "lro_queued", - CTLFLAG_RW, &sc->hn_lro.lro_queued, 0, "LRO queued"); - SYSCTL_ADD_U64(ctx, child, OID_AUTO, "lro_flushed", - CTLFLAG_RW, &sc->hn_lro.lro_flushed, 0, "LRO flushed"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "lro_tried", - CTLFLAG_RW, &sc->hn_lro_tried, "# of LRO tries"); - SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", - CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU", - "Max # of data bytes to be aggregated by LRO"); - SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", - CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I", - "Max # of ACKs to be aggregated by LRO"); - SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", - CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP, - hn_trust_hcsum_sysctl, "I", - "Trust tcp segement verification on host side, " - "when csum info is missing"); - SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp", - CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP, - hn_trust_hcsum_sysctl, "I", - "Trust udp datagram verification on host side, " - "when csum info is missing"); - SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip", - CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP, - hn_trust_hcsum_sysctl, "I", - "Trust ip packet verification on host side, " - "when csum info is missing"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_ip", - CTLFLAG_RW, &sc->hn_csum_ip, "RXCSUM IP"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_tcp", - CTLFLAG_RW, &sc->hn_csum_tcp, "RXCSUM TCP"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_udp", - CTLFLAG_RW, &sc->hn_csum_udp, "RXCSUM UDP"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "csum_trusted", - CTLFLAG_RW, &sc->hn_csum_trusted, - "# of packets that we trust host's csum verification"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "small_pkts", - CTLFLAG_RW, &sc->hn_small_pkts, "# of small packets received"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs", CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs"); SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed", @@ -596,9 +537,7 @@ netvsc_detach(device_t dev) taskqueue_free(sc->hn_tx_taskq); ifmedia_removeall(&sc->hn_media); -#if defined(INET) || defined(INET6) - tcp_lro_free(&sc->hn_lro); -#endif + hn_destroy_rx_data(sc); hn_destroy_tx_ring(sc); return (0); @@ -746,7 +685,8 @@ netvsc_channel_rollup(struct hv_device *device_ctx) { struct hn_softc *sc = device_get_softc(device_ctx->device); #if defined(INET) || defined(INET6) - struct lro_ctrl *lro = &sc->hn_lro; + struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */ + struct lro_ctrl *lro = &rxr->hn_lro; struct lro_entry *queued; while ((queued = SLIST_FIRST(&lro->lro_active)) != NULL) { @@ -1162,10 +1102,10 @@ int netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, rndis_tcp_ip_csum_info *csum_info) { - hn_softc_t *sc = (hn_softc_t *)device_get_softc(device_ctx->device); + struct hn_softc *sc = device_get_softc(device_ctx->device); + struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */ struct mbuf *m_new; struct ifnet *ifp; - device_t dev = device_ctx->device; int size, do_lro = 0, do_csum = 1; if (sc == NULL) { @@ -1190,7 +1130,7 @@ netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, memcpy(mtod(m_new, void *), packet->data, packet->tot_data_buf_len); m_new->m_pkthdr.len = m_new->m_len = packet->tot_data_buf_len; - sc->hn_small_pkts++; + rxr->hn_small_pkts++; } else { /* * Get an mbuf with a cluster. For packets 2K or less, @@ -1206,7 +1146,7 @@ netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, m_new = m_getjcl(M_NOWAIT, MT_DATA, M_PKTHDR, size); if (m_new == NULL) { - device_printf(dev, "alloc mbuf failed.\n"); + if_printf(ifp, "alloc mbuf failed.\n"); return (0); } @@ -1223,7 +1163,7 @@ netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, if (csum_info->receive.ip_csum_succeeded && do_csum) { m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); - sc->hn_csum_ip++; + rxr->hn_csum_ip++; } /* TCP/UDP csum offload */ @@ -1233,9 +1173,9 @@ netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, (CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; if (csum_info->receive.tcp_csum_succeeded) - sc->hn_csum_tcp++; + rxr->hn_csum_tcp++; else - sc->hn_csum_udp++; + rxr->hn_csum_udp++; } if (csum_info->receive.ip_csum_succeeded && @@ -1267,8 +1207,9 @@ netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, pr = hn_check_iplen(m_new, hoff); if (pr == IPPROTO_TCP) { if (do_csum && - (sc->hn_trust_hcsum & HN_TRUST_HCSUM_TCP)) { - sc->hn_csum_trusted++; + (rxr->hn_trust_hcsum & + HN_TRUST_HCSUM_TCP)) { + rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); @@ -1278,16 +1219,17 @@ netvsc_recv(struct hv_device *device_ctx, netvsc_packet *packet, do_lro = 1; } else if (pr == IPPROTO_UDP) { if (do_csum && - (sc->hn_trust_hcsum & HN_TRUST_HCSUM_UDP)) { - sc->hn_csum_trusted++; + (rxr->hn_trust_hcsum & + HN_TRUST_HCSUM_UDP)) { + rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID | CSUM_DATA_VALID | CSUM_PSEUDO_HDR); m_new->m_pkthdr.csum_data = 0xffff; } } else if (pr != IPPROTO_DONE && do_csum && - (sc->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) { - sc->hn_csum_trusted++; + (rxr->hn_trust_hcsum & HN_TRUST_HCSUM_IP)) { + rxr->hn_csum_trusted++; m_new->m_pkthdr.csum_flags |= (CSUM_IP_CHECKED | CSUM_IP_VALID); } @@ -1309,10 +1251,10 @@ skip: if ((ifp->if_capenable & IFCAP_LRO) && do_lro) { #if defined(INET) || defined(INET6) - struct lro_ctrl *lro = &sc->hn_lro; + struct lro_ctrl *lro = &rxr->hn_lro; if (lro->lro_cnt) { - sc->hn_lro_tried++; + rxr->hn_lro_tried++; if (tcp_lro_rx(lro, m_new, 0) == 0) { /* DONE! */ return 0; @@ -1392,8 +1334,17 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) * Make sure that LRO aggregation length limit is still * valid, after the MTU change. */ - if (sc->hn_lro.lro_length_lim < HN_LRO_LENLIM_MIN(ifp)) - sc->hn_lro.lro_length_lim = HN_LRO_LENLIM_MIN(ifp); + NV_LOCK(sc); + if (sc->hn_rx_ring[0].hn_lro.lro_length_lim < + HN_LRO_LENLIM_MIN(ifp)) { + int i; + + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + sc->hn_rx_ring[i].hn_lro.lro_length_lim = + HN_LRO_LENLIM_MIN(ifp); + } + } + NV_UNLOCK(sc); do { NV_LOCK(sc); @@ -1705,9 +1656,9 @@ hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; unsigned int lenlim; - int error; + int error, i; - lenlim = sc->hn_lro.lro_length_lim; + lenlim = sc->hn_rx_ring[0].hn_lro.lro_length_lim; error = sysctl_handle_int(oidp, &lenlim, 0, req); if (error || req->newptr == NULL) return error; @@ -1716,7 +1667,10 @@ hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS) lenlim > TCP_LRO_LENGTH_MAX) return EINVAL; - sc->hn_lro.lro_length_lim = lenlim; + NV_LOCK(sc); + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) + sc->hn_rx_ring[i].hn_lro.lro_length_lim = lenlim; + NV_UNLOCK(sc); return 0; } @@ -1724,13 +1678,13 @@ static int hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; - int ackcnt, error; + int ackcnt, error, i; /* * lro_ackcnt_lim is append count limit, * +1 to turn it into aggregation limit. */ - ackcnt = sc->hn_lro.lro_ackcnt_lim + 1; + ackcnt = sc->hn_rx_ring[0].hn_lro.lro_ackcnt_lim + 1; error = sysctl_handle_int(oidp, &ackcnt, 0, req); if (error || req->newptr == NULL) return error; @@ -1742,7 +1696,11 @@ hn_lro_ackcnt_sysctl(SYSCTL_HANDLER_ARGS) * Convert aggregation limit back to append * count limit. */ - sc->hn_lro.lro_ackcnt_lim = ackcnt - 1; + --ackcnt; + NV_LOCK(sc); + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) + sc->hn_rx_ring[i].hn_lro.lro_ackcnt_lim = ackcnt; + NV_UNLOCK(sc); return 0; } @@ -1751,10 +1709,10 @@ hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) { struct hn_softc *sc = arg1; int hcsum = arg2; - int on, error; + int on, error, i; on = 0; - if (sc->hn_trust_hcsum & hcsum) + if (sc->hn_rx_ring[0].hn_trust_hcsum & hcsum) on = 1; error = sysctl_handle_int(oidp, &on, 0, req); @@ -1762,10 +1720,14 @@ hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS) return error; NV_LOCK(sc); - if (on) - sc->hn_trust_hcsum |= hcsum; - else - sc->hn_trust_hcsum &= ~hcsum; + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; + + if (on) + rxr->hn_trust_hcsum |= hcsum; + else + rxr->hn_trust_hcsum &= ~hcsum; + } NV_UNLOCK(sc); return 0; } @@ -1789,6 +1751,58 @@ hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS) return 0; } +static int +hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct hn_softc *sc = arg1; + int ofs = arg2, i, error; + struct hn_rx_ring *rxr; + u_long stat; + + stat = 0; + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + rxr = &sc->hn_rx_ring[i]; + stat += *((u_long *)((uint8_t *)rxr + ofs)); + } + + error = sysctl_handle_long(oidp, &stat, 0, req); + if (error || req->newptr == NULL) + return error; + + /* Zero out this stat. */ + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + rxr = &sc->hn_rx_ring[i]; + *((u_long *)((uint8_t *)rxr + ofs)) = 0; + } + return 0; +} + +static int +hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct hn_softc *sc = arg1; + int ofs = arg2, i, error; + struct hn_rx_ring *rxr; + uint64_t stat; + + stat = 0; + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + rxr = &sc->hn_rx_ring[i]; + stat += *((uint64_t *)((uint8_t *)rxr + ofs)); + } + + error = sysctl_handle_64(oidp, &stat, 0, req); + if (error || req->newptr == NULL) + return error; + + /* Zero out this stat. */ + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + rxr = &sc->hn_rx_ring[i]; + *((uint64_t *)((uint8_t *)rxr + ofs)) = 0; + } + return 0; +} + static int hn_check_iplen(const struct mbuf *m, int hoff) { @@ -1876,6 +1890,136 @@ hn_dma_map_paddr(void *arg, bus_dma_segment_t *segs, int nseg, int error) *paddr = segs->ds_addr; } +static void +hn_create_rx_data(struct hn_softc *sc) +{ + struct sysctl_oid_list *child; + struct sysctl_ctx_list *ctx; + device_t dev = sc->hn_dev; +#if defined(INET) || defined(INET6) +#if __FreeBSD_version >= 1100095 + int lroent_cnt; +#endif +#endif + int i; + + sc->hn_rx_ring_cnt = 1; /* TODO: vRSS */ + sc->hn_rx_ring = malloc(sizeof(struct hn_rx_ring) * sc->hn_rx_ring_cnt, + M_NETVSC, M_WAITOK | M_ZERO); + +#if defined(INET) || defined(INET6) +#if __FreeBSD_version >= 1100095 + lroent_cnt = hn_lro_entry_count; + if (lroent_cnt < TCP_LRO_ENTRIES) + lroent_cnt = TCP_LRO_ENTRIES; + device_printf(dev, "LRO: entry count %d\n", lroent_cnt); +#endif +#endif /* INET || INET6 */ + + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) { + struct hn_rx_ring *rxr = &sc->hn_rx_ring[i]; + + if (hn_trust_hosttcp) + rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_TCP; + if (hn_trust_hostudp) + rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_UDP; + if (hn_trust_hostip) + rxr->hn_trust_hcsum |= HN_TRUST_HCSUM_IP; + + /* + * Initialize LRO. + */ +#if defined(INET) || defined(INET6) +#if __FreeBSD_version >= 1100095 + tcp_lro_init_args(&rxr->hn_lro, sc->hn_ifp, lroent_cnt, 0); +#else + tcp_lro_init(&rxr->hn_lro); + rxr->hn_lro.ifp = sc->hn_ifp; +#endif + rxr->hn_lro.lro_length_lim = HN_LRO_LENLIM_DEF; + rxr->hn_lro.lro_ackcnt_lim = HN_LRO_ACKCNT_DEF; +#endif /* INET || INET6 */ + } + + ctx = device_get_sysctl_ctx(dev); + child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); + + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_queued", + CTLTYPE_U64 | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_lro.lro_queued), + hn_rx_stat_u64_sysctl, "LU", "LRO queued"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_flushed", + CTLTYPE_U64 | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_lro.lro_flushed), + hn_rx_stat_u64_sysctl, "LU", "LRO flushed"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_tried", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_lro_tried), + hn_rx_stat_ulong_sysctl, "LU", "# of LRO tries"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_length_lim", + CTLTYPE_UINT | CTLFLAG_RW, sc, 0, hn_lro_lenlim_sysctl, "IU", + "Max # of data bytes to be aggregated by LRO"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "lro_ackcnt_lim", + CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_lro_ackcnt_sysctl, "I", + "Max # of ACKs to be aggregated by LRO"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hosttcp", + CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_TCP, + hn_trust_hcsum_sysctl, "I", + "Trust tcp segement verification on host side, " + "when csum info is missing"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostudp", + CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_UDP, + hn_trust_hcsum_sysctl, "I", + "Trust udp datagram verification on host side, " + "when csum info is missing"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "trust_hostip", + CTLTYPE_INT | CTLFLAG_RW, sc, HN_TRUST_HCSUM_IP, + hn_trust_hcsum_sysctl, "I", + "Trust ip packet verification on host side, " + "when csum info is missing"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_ip", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_csum_ip), + hn_rx_stat_ulong_sysctl, "LU", "RXCSUM IP"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_tcp", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_csum_tcp), + hn_rx_stat_ulong_sysctl, "LU", "RXCSUM TCP"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_udp", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_csum_udp), + hn_rx_stat_ulong_sysctl, "LU", "RXCSUM UDP"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "csum_trusted", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_csum_trusted), + hn_rx_stat_ulong_sysctl, "LU", + "# of packets that we trust host's csum verification"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "small_pkts", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_rx_ring, hn_small_pkts), + hn_rx_stat_ulong_sysctl, "LU", "# of small packets received"); +} + +static void +hn_destroy_rx_data(struct hn_softc *sc) +{ +#if defined(INET) || defined(INET6) + int i; +#endif + + if (sc->hn_rx_ring_cnt == 0) + return; + +#if defined(INET) || defined(INET6) + for (i = 0; i < sc->hn_rx_ring_cnt; ++i) + tcp_lro_free(&sc->hn_rx_ring[i].hn_lro); +#endif + free(sc->hn_rx_ring, M_NETVSC); + sc->hn_rx_ring = NULL; + + sc->hn_rx_ring_cnt = 0; +} + static int hn_create_tx_ring(struct hn_softc *sc) { -- cgit v1.2.3 From 4d8e8cb113bcb3d698b8fb2b7ccfc50879a34149 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 07:23:05 +0000 Subject: hyperv/hn: Use taskqueue_enqueue() This also eases experiment on the non-fast taskqueue. Reviewed by: adrian, Jun Su Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5276 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index dcf5b1a3949b..b395aecd49d9 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -1549,7 +1549,7 @@ hn_start(struct ifnet *ifp) return; } do_sched: - taskqueue_enqueue_fast(sc->hn_tx_taskq, &sc->hn_start_task); + taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_start_task); } static void @@ -1566,10 +1566,8 @@ hn_start_txeof(struct ifnet *ifp) atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); sched = hn_start_locked(ifp, sc->hn_direct_tx_size); NV_UNLOCK(sc); - if (sched) { - taskqueue_enqueue_fast(sc->hn_tx_taskq, - &sc->hn_start_task); - } + if (sched) + taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_start_task); } else { do_sched: /* @@ -1579,7 +1577,7 @@ do_sched: * races. */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - taskqueue_enqueue_fast(sc->hn_tx_taskq, &sc->hn_txeof_task); + taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_txeof_task); } } -- cgit v1.2.3 From b8f2d59daf174932e54b88507077ba158e32df83 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 07:28:45 +0000 Subject: hyperv/hn: Use non-fast taskqueue for transmission Performance stays same; so no need to use fast taskqueue here. Suggested by: royger Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5282 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index b395aecd49d9..36435a53f368 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -368,7 +368,7 @@ netvsc_attach(device_t dev) sc->hn_direct_tx_size = hn_direct_tx_size; if (hn_tx_taskq == NULL) { - sc->hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, + sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &sc->hn_tx_taskq); taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", device_get_nameunit(dev)); @@ -2178,7 +2178,7 @@ hn_tx_taskq_create(void *arg __unused) if (!hn_share_tx_taskq) return; - hn_tx_taskq = taskqueue_create_fast("hn_tx", M_WAITOK, + hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &hn_tx_taskq); taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx"); } -- cgit v1.2.3 From dbfb4eba54247e4055cb535b53a8497ff5d0f932 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 07:37:59 +0000 Subject: hyperv/hn: Split TX ring data structure out of softc This paves the way for upcoming vRSS stuffs and eases more code cleanup. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5283 --- sys/dev/hyperv/netvsc/hv_net_vsc.h | 61 +-- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 551 +++++++++++++++++--------- 2 files changed, 404 insertions(+), 208 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h index 31f1e704ecc1..fa71af236f17 100644 --- a/sys/dev/hyperv/netvsc/hv_net_vsc.h +++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h @@ -1011,6 +1011,38 @@ struct hn_rx_ring { #define HN_TRUST_HCSUM_TCP 0x0002 #define HN_TRUST_HCSUM_UDP 0x0004 +struct hn_tx_ring { + struct mtx hn_txlist_spin; + struct hn_txdesc_list hn_txlist; + int hn_txdesc_cnt; + int hn_txdesc_avail; + int hn_txeof; + + int hn_sched_tx; + struct taskqueue *hn_tx_taskq; + struct task hn_start_task; + struct task hn_txeof_task; + + struct mtx hn_tx_lock; + struct hn_softc *hn_sc; + + int hn_direct_tx_size; + int hn_tx_chimney_size; + bus_dma_tag_t hn_tx_data_dtag; + uint64_t hn_csum_assist; + + u_long hn_no_txdescs; + u_long hn_send_failed; + u_long hn_txdma_failed; + u_long hn_tx_collapsed; + u_long hn_tx_chimney; + + /* Rarely used stuffs */ + struct hn_txdesc *hn_txdesc; + bus_dma_tag_t hn_tx_rndis_dtag; + struct sysctl_oid *hn_tx_sysctl_tree; +} __aligned(CACHE_LINE_SIZE); + /* * Device-specific softc structure */ @@ -1028,33 +1060,14 @@ typedef struct hn_softc { struct hv_device *hn_dev_obj; netvsc_dev *net_dev; - struct hn_txdesc *hn_txdesc; - bus_dma_tag_t hn_tx_data_dtag; - bus_dma_tag_t hn_tx_rndis_dtag; - int hn_tx_chimney_size; - int hn_tx_chimney_max; - uint64_t hn_csum_assist; - - struct mtx hn_txlist_spin; - struct hn_txdesc_list hn_txlist; - int hn_txdesc_cnt; - int hn_txdesc_avail; - int hn_txeof; - - int hn_sched_tx; - int hn_direct_tx_size; - struct taskqueue *hn_tx_taskq; - struct task hn_start_task; - struct task hn_txeof_task; - int hn_rx_ring_cnt; struct hn_rx_ring *hn_rx_ring; - u_long hn_no_txdescs; - u_long hn_send_failed; - u_long hn_txdma_failed; - u_long hn_tx_collapsed; - u_long hn_tx_chimney; + int hn_tx_ring_cnt; + struct hn_tx_ring *hn_tx_ring; + int hn_tx_chimney_max; + struct taskqueue *hn_tx_taskq; + struct sysctl_oid *hn_tx_sysctl_tree; } hn_softc_t; /* diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 36435a53f368..23e43cedae0c 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -153,7 +153,7 @@ __FBSDID("$FreeBSD$"); struct hn_txdesc { SLIST_ENTRY(hn_txdesc) link; struct mbuf *m; - struct hn_softc *sc; + struct hn_tx_ring *txr; int refs; uint32_t flags; /* HN_TXD_FLAG_ */ netvsc_packet netvsc_pkt; /* XXX to be removed */ @@ -193,7 +193,6 @@ struct hn_txdesc { #define NV_LOCK_INIT(_sc, _name) \ mtx_init(&(_sc)->hn_lock, _name, MTX_NETWORK_LOCK, MTX_DEF) #define NV_LOCK(_sc) mtx_lock(&(_sc)->hn_lock) -#define NV_TRYLOCK(_sc) mtx_trylock(&(_sc)->hn_lock) #define NV_LOCK_ASSERT(_sc) mtx_assert(&(_sc)->hn_lock, MA_OWNED) #define NV_UNLOCK(_sc) mtx_unlock(&(_sc)->hn_lock) #define NV_LOCK_DESTROY(_sc) mtx_destroy(&(_sc)->hn_lock) @@ -266,9 +265,9 @@ static void hn_stop(hn_softc_t *sc); static void hn_ifinit_locked(hn_softc_t *sc); static void hn_ifinit(void *xsc); static int hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data); -static int hn_start_locked(struct ifnet *ifp, int len); +static int hn_start_locked(struct hn_tx_ring *txr, int len); static void hn_start(struct ifnet *ifp); -static void hn_start_txeof(struct ifnet *ifp); +static void hn_start_txeof(struct hn_tx_ring *); static int hn_ifmedia_upd(struct ifnet *ifp); static void hn_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr); static int hn_lro_lenlim_sysctl(SYSCTL_HANDLER_ARGS); @@ -277,14 +276,20 @@ static int hn_trust_hcsum_sysctl(SYSCTL_HANDLER_ARGS); static int hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); static int hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS); +static int hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS); +static int hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS); static int hn_check_iplen(const struct mbuf *, int); -static int hn_create_tx_ring(struct hn_softc *sc); -static void hn_destroy_tx_ring(struct hn_softc *sc); +static int hn_create_tx_ring(struct hn_softc *, int); +static void hn_destroy_tx_ring(struct hn_tx_ring *); +static int hn_create_tx_data(struct hn_softc *); +static void hn_destroy_tx_data(struct hn_softc *); static void hn_start_taskfunc(void *xsc, int pending); static void hn_txeof_taskfunc(void *xsc, int pending); -static int hn_encap(struct hn_softc *, struct hn_txdesc *, struct mbuf **); +static void hn_stop_tx_tasks(struct hn_softc *); +static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **); static void hn_create_rx_data(struct hn_softc *sc); static void hn_destroy_rx_data(struct hn_softc *sc); +static void hn_set_tx_chimney_size(struct hn_softc *, int); static int hn_ifmedia_upd(struct ifnet *ifp __unused) @@ -350,8 +355,6 @@ netvsc_attach(device_t dev) hn_softc_t *sc; int unit = device_get_unit(dev); struct ifnet *ifp = NULL; - struct sysctl_oid_list *child; - struct sysctl_ctx_list *ctx; int error; #if __FreeBSD_version >= 1100045 int tso_maxlen; @@ -365,7 +368,6 @@ netvsc_attach(device_t dev) bzero(sc, sizeof(hn_softc_t)); sc->hn_unit = unit; sc->hn_dev = dev; - sc->hn_direct_tx_size = hn_direct_tx_size; if (hn_tx_taskq == NULL) { sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, @@ -375,13 +377,6 @@ netvsc_attach(device_t dev) } else { sc->hn_tx_taskq = hn_tx_taskq; } - TASK_INIT(&sc->hn_start_task, 0, hn_start_taskfunc, sc); - TASK_INIT(&sc->hn_txeof_task, 0, hn_txeof_taskfunc, sc); - - error = hn_create_tx_ring(sc); - if (error) - goto failed; - NV_LOCK_INIT(sc, "NetVSCLock"); sc->hn_dev_obj = device_ctx; @@ -389,6 +384,10 @@ netvsc_attach(device_t dev) ifp = sc->hn_ifp = if_alloc(IFT_ETHER); ifp->if_softc = sc; + error = hn_create_tx_data(sc); + if (error) + goto failed; + hn_create_rx_data(sc); if_initname(ifp, device_get_name(dev), device_get_unit(dev)); @@ -421,12 +420,7 @@ netvsc_attach(device_t dev) ifp->if_capenable |= IFCAP_VLAN_HWTAGGING | IFCAP_VLAN_MTU | IFCAP_HWCSUM | IFCAP_TSO | IFCAP_LRO; - - if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1) - sc->hn_csum_assist = HN_CSUM_ASSIST; - else - sc->hn_csum_assist = HN_CSUM_ASSIST_WIN8; - ifp->if_hwassist = sc->hn_csum_assist | CSUM_TSO; + ifp->if_hwassist = sc->hn_tx_ring[0].hn_csum_assist | CSUM_TSO; error = hv_rf_on_device_add(device_ctx, &device_info); if (error) @@ -455,52 +449,14 @@ netvsc_attach(device_t dev) #endif sc->hn_tx_chimney_max = sc->net_dev->send_section_size; - sc->hn_tx_chimney_size = sc->hn_tx_chimney_max; + hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max); if (hn_tx_chimney_size > 0 && hn_tx_chimney_size < sc->hn_tx_chimney_max) - sc->hn_tx_chimney_size = hn_tx_chimney_size; - - /* - * Always schedule transmission instead of trying - * to do direct transmission. This one gives the - * best performance so far. - */ - sc->hn_sched_tx = 1; - - ctx = device_get_sysctl_ctx(dev); - child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); - - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "no_txdescs", - CTLFLAG_RW, &sc->hn_no_txdescs, "# of times short of TX descs"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "send_failed", - CTLFLAG_RW, &sc->hn_send_failed, "# of hyper-v sending failure"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "txdma_failed", - CTLFLAG_RW, &sc->hn_txdma_failed, "# of TX DMA failure"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_collapsed", - CTLFLAG_RW, &sc->hn_tx_collapsed, "# of TX mbuf collapsed"); - SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "tx_chimney", - CTLFLAG_RW, &sc->hn_tx_chimney, "# of chimney send"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt", - CTLFLAG_RD, &sc->hn_txdesc_cnt, 0, "# of total TX descs"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail", - CTLFLAG_RD, &sc->hn_txdesc_avail, 0, "# of available TX descs"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max", - CTLFLAG_RD, &sc->hn_tx_chimney_max, 0, - "Chimney send packet size upper boundary"); - SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size", - CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl, - "I", "Chimney send packet size limit"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "direct_tx_size", - CTLFLAG_RW, &sc->hn_direct_tx_size, 0, - "Size of the packet for direct transmission"); - SYSCTL_ADD_INT(ctx, child, OID_AUTO, "sched_tx", - CTLFLAG_RW, &sc->hn_sched_tx, 0, - "Always schedule transmission " - "instead of doing direct transmission"); + hn_set_tx_chimney_size(sc, hn_tx_chimney_size); return (0); failed: - hn_destroy_tx_ring(sc); + hn_destroy_tx_data(sc); if (ifp != NULL) if_free(ifp); return (error); @@ -531,14 +487,14 @@ netvsc_detach(device_t dev) hv_rf_on_device_remove(hv_device, HV_RF_NV_DESTROY_CHANNEL); - taskqueue_drain(sc->hn_tx_taskq, &sc->hn_start_task); - taskqueue_drain(sc->hn_tx_taskq, &sc->hn_txeof_task); - if (sc->hn_tx_taskq != hn_tx_taskq) - taskqueue_free(sc->hn_tx_taskq); + hn_stop_tx_tasks(sc); ifmedia_removeall(&sc->hn_media); hn_destroy_rx_data(sc); - hn_destroy_tx_ring(sc); + hn_destroy_tx_data(sc); + + if (sc->hn_tx_taskq != hn_tx_taskq) + taskqueue_free(sc->hn_tx_taskq); return (0); } @@ -553,13 +509,13 @@ netvsc_shutdown(device_t dev) } static __inline int -hn_txdesc_dmamap_load(struct hn_softc *sc, struct hn_txdesc *txd, +hn_txdesc_dmamap_load(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head, bus_dma_segment_t *segs, int *nsegs) { struct mbuf *m = *m_head; int error; - error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag, txd->data_dmap, + error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); if (error == EFBIG) { struct mbuf *m_new; @@ -569,13 +525,13 @@ hn_txdesc_dmamap_load(struct hn_softc *sc, struct hn_txdesc *txd, return ENOBUFS; else *m_head = m = m_new; - sc->hn_tx_collapsed++; + txr->hn_tx_collapsed++; - error = bus_dmamap_load_mbuf_sg(sc->hn_tx_data_dtag, + error = bus_dmamap_load_mbuf_sg(txr->hn_tx_data_dtag, txd->data_dmap, m, segs, nsegs, BUS_DMA_NOWAIT); } if (!error) { - bus_dmamap_sync(sc->hn_tx_data_dtag, txd->data_dmap, + bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_PREWRITE); txd->flags |= HN_TXD_FLAG_DMAMAP; } @@ -583,20 +539,20 @@ hn_txdesc_dmamap_load(struct hn_softc *sc, struct hn_txdesc *txd, } static __inline void -hn_txdesc_dmamap_unload(struct hn_softc *sc, struct hn_txdesc *txd) +hn_txdesc_dmamap_unload(struct hn_tx_ring *txr, struct hn_txdesc *txd) { if (txd->flags & HN_TXD_FLAG_DMAMAP) { - bus_dmamap_sync(sc->hn_tx_data_dtag, + bus_dmamap_sync(txr->hn_tx_data_dtag, txd->data_dmap, BUS_DMASYNC_POSTWRITE); - bus_dmamap_unload(sc->hn_tx_data_dtag, + bus_dmamap_unload(txr->hn_tx_data_dtag, txd->data_dmap); txd->flags &= ~HN_TXD_FLAG_DMAMAP; } } static __inline int -hn_txdesc_put(struct hn_softc *sc, struct hn_txdesc *txd) +hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) { KASSERT((txd->flags & HN_TXD_FLAG_ONLIST) == 0, @@ -606,7 +562,7 @@ hn_txdesc_put(struct hn_softc *sc, struct hn_txdesc *txd) if (atomic_fetchadd_int(&txd->refs, -1) != 1) return 0; - hn_txdesc_dmamap_unload(sc, txd); + hn_txdesc_dmamap_unload(txr, txd); if (txd->m != NULL) { m_freem(txd->m); txd->m = NULL; @@ -614,31 +570,31 @@ hn_txdesc_put(struct hn_softc *sc, struct hn_txdesc *txd) txd->flags |= HN_TXD_FLAG_ONLIST; - mtx_lock_spin(&sc->hn_txlist_spin); - KASSERT(sc->hn_txdesc_avail >= 0 && - sc->hn_txdesc_avail < sc->hn_txdesc_cnt, - ("txdesc_put: invalid txd avail %d", sc->hn_txdesc_avail)); - sc->hn_txdesc_avail++; - SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link); - mtx_unlock_spin(&sc->hn_txlist_spin); + mtx_lock_spin(&txr->hn_txlist_spin); + KASSERT(txr->hn_txdesc_avail >= 0 && + txr->hn_txdesc_avail < txr->hn_txdesc_cnt, + ("txdesc_put: invalid txd avail %d", txr->hn_txdesc_avail)); + txr->hn_txdesc_avail++; + SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); + mtx_unlock_spin(&txr->hn_txlist_spin); return 1; } static __inline struct hn_txdesc * -hn_txdesc_get(struct hn_softc *sc) +hn_txdesc_get(struct hn_tx_ring *txr) { struct hn_txdesc *txd; - mtx_lock_spin(&sc->hn_txlist_spin); - txd = SLIST_FIRST(&sc->hn_txlist); + mtx_lock_spin(&txr->hn_txlist_spin); + txd = SLIST_FIRST(&txr->hn_txlist); if (txd != NULL) { - KASSERT(sc->hn_txdesc_avail > 0, - ("txdesc_get: invalid txd avail %d", sc->hn_txdesc_avail)); - sc->hn_txdesc_avail--; - SLIST_REMOVE_HEAD(&sc->hn_txlist, link); + KASSERT(txr->hn_txdesc_avail > 0, + ("txdesc_get: invalid txd avail %d", txr->hn_txdesc_avail)); + txr->hn_txdesc_avail--; + SLIST_REMOVE_HEAD(&txr->hn_txlist, link); } - mtx_unlock_spin(&sc->hn_txlist_spin); + mtx_unlock_spin(&txr->hn_txlist_spin); if (txd != NULL) { KASSERT(txd->m == NULL && txd->refs == 0 && @@ -670,20 +626,21 @@ netvsc_xmit_completion(void *context) { netvsc_packet *packet = context; struct hn_txdesc *txd; - struct hn_softc *sc; + struct hn_tx_ring *txr; txd = (struct hn_txdesc *)(uintptr_t) packet->compl.send.send_completion_tid; - sc = txd->sc; - sc->hn_txeof = 1; - hn_txdesc_put(sc, txd); + txr = txd->txr; + txr->hn_txeof = 1; + hn_txdesc_put(txr, txd); } void netvsc_channel_rollup(struct hv_device *device_ctx) { struct hn_softc *sc = device_get_softc(device_ctx->device); + struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; /* TODO: vRSS */ #if defined(INET) || defined(INET6) struct hn_rx_ring *rxr = &sc->hn_rx_ring[0]; /* TODO: vRSS */ struct lro_ctrl *lro = &rxr->hn_lro; @@ -695,11 +652,11 @@ netvsc_channel_rollup(struct hv_device *device_ctx) } #endif - if (!sc->hn_txeof) + if (!txr->hn_txeof) return; - sc->hn_txeof = 0; - hn_start_txeof(sc->hn_ifp); + txr->hn_txeof = 0; + hn_start_txeof(txr); } /* @@ -707,7 +664,7 @@ netvsc_channel_rollup(struct hv_device *device_ctx) * If this function fails, then both txd and m_head0 will be freed. */ static int -hn_encap(struct hn_softc *sc, struct hn_txdesc *txd, struct mbuf **m_head0) +hn_encap(struct hn_tx_ring *txr, struct hn_txdesc *txd, struct mbuf **m_head0) { bus_dma_segment_t segs[HN_TX_DATA_SEGCNT_MAX]; int error, nsegs, i; @@ -810,7 +767,7 @@ hn_encap(struct hn_softc *sc, struct hn_txdesc *txd, struct mbuf **m_head0) #endif tso_info->lso_v2_xmit.tcp_header_offset = 0; tso_info->lso_v2_xmit.mss = m_head->m_pkthdr.tso_segsz; - } else if (m_head->m_pkthdr.csum_flags & sc->hn_csum_assist) { + } else if (m_head->m_pkthdr.csum_flags & txr->hn_csum_assist) { rndis_tcp_ip_csum_info *csum_info; rndis_msg_size += RNDIS_CSUM_PPI_SIZE; @@ -837,8 +794,8 @@ hn_encap(struct hn_softc *sc, struct hn_txdesc *txd, struct mbuf **m_head0) /* * Chimney send, if the packet could fit into one chimney buffer. */ - if (packet->tot_data_buf_len < sc->hn_tx_chimney_size) { - netvsc_dev *net_dev = sc->net_dev; + if (packet->tot_data_buf_len < txr->hn_tx_chimney_size) { + netvsc_dev *net_dev = txr->hn_sc->net_dev; uint32_t send_buf_section_idx; send_buf_section_idx = @@ -857,12 +814,12 @@ hn_encap(struct hn_softc *sc, struct hn_txdesc *txd, struct mbuf **m_head0) packet->send_buf_section_size = packet->tot_data_buf_len; packet->page_buf_count = 0; - sc->hn_tx_chimney++; + txr->hn_tx_chimney++; goto done; } } - error = hn_txdesc_dmamap_load(sc, txd, &m_head, segs, &nsegs); + error = hn_txdesc_dmamap_load(txr, txd, &m_head, segs, &nsegs); if (error) { int freed; @@ -872,12 +829,12 @@ hn_encap(struct hn_softc *sc, struct hn_txdesc *txd, struct mbuf **m_head0) m_freem(m_head); *m_head0 = NULL; - freed = hn_txdesc_put(sc, txd); + freed = hn_txdesc_put(txr, txd); KASSERT(freed != 0, ("fail to free txd upon txdma error")); - sc->hn_txdma_failed++; - if_inc_counter(sc->hn_ifp, IFCOUNTER_OERRORS, 1); + txr->hn_txdma_failed++; + if_inc_counter(txr->hn_sc->hn_ifp, IFCOUNTER_OERRORS, 1); return error; } *m_head0 = m_head; @@ -920,11 +877,15 @@ done: * Start a transmit of one or more packets */ static int -hn_start_locked(struct ifnet *ifp, int len) +hn_start_locked(struct hn_tx_ring *txr, int len) { - struct hn_softc *sc = ifp->if_softc; + struct hn_softc *sc = txr->hn_sc; + struct ifnet *ifp = sc->hn_ifp; struct hv_device *device_ctx = vmbus_get_devctx(sc->hn_dev); + KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); + mtx_assert(&txr->hn_tx_lock, MA_OWNED); + if ((ifp->if_drv_flags & (IFF_DRV_RUNNING | IFF_DRV_OACTIVE)) != IFF_DRV_RUNNING) return 0; @@ -948,15 +909,15 @@ hn_start_locked(struct ifnet *ifp, int len) return 1; } - txd = hn_txdesc_get(sc); + txd = hn_txdesc_get(txr); if (txd == NULL) { - sc->hn_no_txdescs++; + txr->hn_no_txdescs++; IF_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; } - error = hn_encap(sc, txd, &m_head); + error = hn_encap(txr, txd, &m_head); if (error) { /* Both txd and m_head are freed */ continue; @@ -971,7 +932,7 @@ again: ETHER_BPF_MTAP(ifp, m_head); if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1); } - hn_txdesc_put(sc, txd); + hn_txdesc_put(txr, txd); if (__predict_false(error)) { int freed; @@ -983,9 +944,9 @@ again: * commands to run? Ask netvsc_channel_rollup() * to kick start later. */ - sc->hn_txeof = 1; + txr->hn_txeof = 1; if (!send_failed) { - sc->hn_send_failed++; + txr->hn_send_failed++; send_failed = 1; /* * Try sending again after set hn_txeof; @@ -1002,11 +963,11 @@ again: * DMA map in hn_txdesc_put(), if it was loaded. */ txd->m = NULL; - freed = hn_txdesc_put(sc, txd); + freed = hn_txdesc_put(txr, txd); KASSERT(freed != 0, ("fail to free txd upon send error")); - sc->hn_send_failed++; + txr->hn_send_failed++; IF_PREPEND(&ifp->if_snd, m_head); atomic_set_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); break; @@ -1384,8 +1345,10 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) } sc->hn_tx_chimney_max = sc->net_dev->send_section_size; - if (sc->hn_tx_chimney_size > sc->hn_tx_chimney_max) - sc->hn_tx_chimney_size = sc->hn_tx_chimney_max; + if (sc->hn_tx_ring[0].hn_tx_chimney_size > + sc->hn_tx_chimney_max) + hn_set_tx_chimney_size(sc, sc->hn_tx_chimney_max); + hn_ifinit_locked(sc); NV_LOCK(sc); @@ -1450,10 +1413,13 @@ hn_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) mask = ifr->ifr_reqcap ^ ifp->if_capenable; if (mask & IFCAP_TXCSUM) { ifp->if_capenable ^= IFCAP_TXCSUM; - if (ifp->if_capenable & IFCAP_TXCSUM) - ifp->if_hwassist |= sc->hn_csum_assist; - else - ifp->if_hwassist &= ~sc->hn_csum_assist; + if (ifp->if_capenable & IFCAP_TXCSUM) { + ifp->if_hwassist |= + sc->hn_tx_ring[0].hn_csum_assist; + } else { + ifp->if_hwassist &= + ~sc->hn_tx_ring[0].hn_csum_assist; + } } if (mask & IFCAP_RXCSUM) @@ -1536,48 +1502,54 @@ static void hn_start(struct ifnet *ifp) { struct hn_softc *sc = ifp->if_softc; + struct hn_tx_ring *txr = &sc->hn_tx_ring[0]; - if (sc->hn_sched_tx) + if (txr->hn_sched_tx) goto do_sched; - if (NV_TRYLOCK(sc)) { + if (mtx_trylock(&txr->hn_tx_lock)) { int sched; - sched = hn_start_locked(ifp, sc->hn_direct_tx_size); - NV_UNLOCK(sc); + sched = hn_start_locked(txr, txr->hn_direct_tx_size); + mtx_unlock(&txr->hn_tx_lock); if (!sched) return; } do_sched: - taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_start_task); + taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_start_task); } static void -hn_start_txeof(struct ifnet *ifp) +hn_start_txeof(struct hn_tx_ring *txr) { - struct hn_softc *sc = ifp->if_softc; + struct hn_softc *sc = txr->hn_sc; + struct ifnet *ifp = sc->hn_ifp; + + KASSERT(txr == &sc->hn_tx_ring[0], ("not the first TX ring")); - if (sc->hn_sched_tx) + if (txr->hn_sched_tx) goto do_sched; - if (NV_TRYLOCK(sc)) { + if (mtx_trylock(&txr->hn_tx_lock)) { int sched; atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - sched = hn_start_locked(ifp, sc->hn_direct_tx_size); - NV_UNLOCK(sc); - if (sched) - taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_start_task); + sched = hn_start_locked(txr, txr->hn_direct_tx_size); + mtx_unlock(&txr->hn_tx_lock); + if (sched) { + taskqueue_enqueue(txr->hn_tx_taskq, + &txr->hn_start_task); + } } else { do_sched: /* * Release the OACTIVE earlier, with the hope, that * others could catch up. The task will clear the - * flag again with the NV_LOCK to avoid possible + * flag again with the hn_tx_lock to avoid possible * races. */ atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - taskqueue_enqueue(sc->hn_tx_taskq, &sc->hn_txeof_task); + taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_txeof_task); } } @@ -1736,7 +1708,7 @@ hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS) struct hn_softc *sc = arg1; int chimney_size, error; - chimney_size = sc->hn_tx_chimney_size; + chimney_size = sc->hn_tx_ring[0].hn_tx_chimney_size; error = sysctl_handle_int(oidp, &chimney_size, 0, req); if (error || req->newptr == NULL) return error; @@ -1744,8 +1716,7 @@ hn_tx_chimney_size_sysctl(SYSCTL_HANDLER_ARGS) if (chimney_size > sc->hn_tx_chimney_max || chimney_size <= 0) return EINVAL; - if (sc->hn_tx_chimney_size != chimney_size) - sc->hn_tx_chimney_size = chimney_size; + hn_set_tx_chimney_size(sc, chimney_size); return 0; } @@ -1801,6 +1772,56 @@ hn_rx_stat_u64_sysctl(SYSCTL_HANDLER_ARGS) return 0; } +static int +hn_tx_stat_ulong_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct hn_softc *sc = arg1; + int ofs = arg2, i, error; + struct hn_tx_ring *txr; + u_long stat; + + stat = 0; + for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { + txr = &sc->hn_tx_ring[i]; + stat += *((u_long *)((uint8_t *)txr + ofs)); + } + + error = sysctl_handle_long(oidp, &stat, 0, req); + if (error || req->newptr == NULL) + return error; + + /* Zero out this stat. */ + for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { + txr = &sc->hn_tx_ring[i]; + *((u_long *)((uint8_t *)txr + ofs)) = 0; + } + return 0; +} + +static int +hn_tx_conf_int_sysctl(SYSCTL_HANDLER_ARGS) +{ + struct hn_softc *sc = arg1; + int ofs = arg2, i, error, conf; + struct hn_tx_ring *txr; + + txr = &sc->hn_tx_ring[0]; + conf = *((int *)((uint8_t *)txr + ofs)); + + error = sysctl_handle_int(oidp, &conf, 0, req); + if (error || req->newptr == NULL) + return error; + + NV_LOCK(sc); + for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { + txr = &sc->hn_tx_ring[i]; + *((int *)((uint8_t *)txr + ofs)) = conf; + } + NV_UNLOCK(sc); + + return 0; +} + static int hn_check_iplen(const struct mbuf *m, int hoff) { @@ -2019,16 +2040,37 @@ hn_destroy_rx_data(struct hn_softc *sc) } static int -hn_create_tx_ring(struct hn_softc *sc) +hn_create_tx_ring(struct hn_softc *sc, int id) { + struct hn_tx_ring *txr = &sc->hn_tx_ring[id]; bus_dma_tag_t parent_dtag; int error, i; - sc->hn_txdesc_cnt = HN_TX_DESC_CNT; - sc->hn_txdesc = malloc(sizeof(struct hn_txdesc) * sc->hn_txdesc_cnt, + txr->hn_sc = sc; + + mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN); + mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF); + + txr->hn_txdesc_cnt = HN_TX_DESC_CNT; + txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt, M_NETVSC, M_WAITOK | M_ZERO); - SLIST_INIT(&sc->hn_txlist); - mtx_init(&sc->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN); + SLIST_INIT(&txr->hn_txlist); + + txr->hn_tx_taskq = sc->hn_tx_taskq; + TASK_INIT(&txr->hn_start_task, 0, hn_start_taskfunc, txr); + TASK_INIT(&txr->hn_txeof_task, 0, hn_txeof_taskfunc, txr); + + txr->hn_direct_tx_size = hn_direct_tx_size; + if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1) + txr->hn_csum_assist = HN_CSUM_ASSIST; + else + txr->hn_csum_assist = HN_CSUM_ASSIST_WIN8; + + /* + * Always schedule transmission instead of trying to do direct + * transmission. This one gives the best performance so far. + */ + txr->hn_sched_tx = 1; parent_dtag = bus_get_dma_tag(sc->hn_dev); @@ -2045,7 +2087,7 @@ hn_create_tx_ring(struct hn_softc *sc) 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ - &sc->hn_tx_rndis_dtag); + &txr->hn_tx_rndis_dtag); if (error) { device_printf(sc->hn_dev, "failed to create rndis dmatag\n"); return error; @@ -2064,21 +2106,21 @@ hn_create_tx_ring(struct hn_softc *sc) 0, /* flags */ NULL, /* lockfunc */ NULL, /* lockfuncarg */ - &sc->hn_tx_data_dtag); + &txr->hn_tx_data_dtag); if (error) { device_printf(sc->hn_dev, "failed to create data dmatag\n"); return error; } - for (i = 0; i < sc->hn_txdesc_cnt; ++i) { - struct hn_txdesc *txd = &sc->hn_txdesc[i]; + for (i = 0; i < txr->hn_txdesc_cnt; ++i) { + struct hn_txdesc *txd = &txr->hn_txdesc[i]; - txd->sc = sc; + txd->txr = txr; /* * Allocate and load RNDIS messages. */ - error = bus_dmamem_alloc(sc->hn_tx_rndis_dtag, + error = bus_dmamem_alloc(txr->hn_tx_rndis_dtag, (void **)&txd->rndis_msg, BUS_DMA_WAITOK | BUS_DMA_COHERENT, &txd->rndis_msg_dmap); @@ -2088,7 +2130,7 @@ hn_create_tx_ring(struct hn_softc *sc) return error; } - error = bus_dmamap_load(sc->hn_tx_rndis_dtag, + error = bus_dmamap_load(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap, txd->rndis_msg, HN_RNDIS_MSG_LEN, hn_dma_map_paddr, &txd->rndis_msg_paddr, @@ -2096,80 +2138,221 @@ hn_create_tx_ring(struct hn_softc *sc) if (error) { device_printf(sc->hn_dev, "failed to load rndis_msg, %d\n", i); - bus_dmamem_free(sc->hn_tx_rndis_dtag, + bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); return error; } /* DMA map for TX data. */ - error = bus_dmamap_create(sc->hn_tx_data_dtag, 0, + error = bus_dmamap_create(txr->hn_tx_data_dtag, 0, &txd->data_dmap); if (error) { device_printf(sc->hn_dev, "failed to allocate tx data dmamap\n"); - bus_dmamap_unload(sc->hn_tx_rndis_dtag, + bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap); - bus_dmamem_free(sc->hn_tx_rndis_dtag, + bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); return error; } /* All set, put it to list */ txd->flags |= HN_TXD_FLAG_ONLIST; - SLIST_INSERT_HEAD(&sc->hn_txlist, txd, link); + SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); + } + txr->hn_txdesc_avail = txr->hn_txdesc_cnt; + + if (sc->hn_tx_sysctl_tree != NULL) { + struct sysctl_oid_list *child; + struct sysctl_ctx_list *ctx; + char name[16]; + + /* + * Create per TX ring sysctl tree: + * dev.hn.UNIT.tx.RINGID + */ + ctx = device_get_sysctl_ctx(sc->hn_dev); + child = SYSCTL_CHILDREN(sc->hn_tx_sysctl_tree); + + snprintf(name, sizeof(name), "%d", id); + txr->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, + name, CTLFLAG_RD, 0, ""); + + if (txr->hn_tx_sysctl_tree != NULL) { + child = SYSCTL_CHILDREN(txr->hn_tx_sysctl_tree); + + SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_avail", + CTLFLAG_RD, &txr->hn_txdesc_avail, 0, + "# of available TX descs"); + } } - sc->hn_txdesc_avail = sc->hn_txdesc_cnt; return 0; } static void -hn_destroy_tx_ring(struct hn_softc *sc) +hn_destroy_tx_ring(struct hn_tx_ring *txr) { struct hn_txdesc *txd; - while ((txd = SLIST_FIRST(&sc->hn_txlist)) != NULL) { + if (txr->hn_txdesc == NULL) + return; + + while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) { KASSERT(txd->m == NULL, ("still has mbuf installed")); KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped")); - SLIST_REMOVE_HEAD(&sc->hn_txlist, link); + SLIST_REMOVE_HEAD(&txr->hn_txlist, link); - bus_dmamap_unload(sc->hn_tx_rndis_dtag, + bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap); - bus_dmamem_free(sc->hn_tx_rndis_dtag, + bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, txd->rndis_msg_dmap); - bus_dmamap_destroy(sc->hn_tx_data_dtag, txd->data_dmap); + bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap); + } + + if (txr->hn_tx_data_dtag != NULL) + bus_dma_tag_destroy(txr->hn_tx_data_dtag); + if (txr->hn_tx_rndis_dtag != NULL) + bus_dma_tag_destroy(txr->hn_tx_rndis_dtag); + free(txr->hn_txdesc, M_NETVSC); + txr->hn_txdesc = NULL; + + mtx_destroy(&txr->hn_txlist_spin); + mtx_destroy(&txr->hn_tx_lock); +} + +static int +hn_create_tx_data(struct hn_softc *sc) +{ + struct sysctl_oid_list *child; + struct sysctl_ctx_list *ctx; + int i; + + sc->hn_tx_ring_cnt = 1; /* TODO: vRSS */ + sc->hn_tx_ring = malloc(sizeof(struct hn_tx_ring) * sc->hn_tx_ring_cnt, + M_NETVSC, M_WAITOK | M_ZERO); + + ctx = device_get_sysctl_ctx(sc->hn_dev); + child = SYSCTL_CHILDREN(device_get_sysctl_tree(sc->hn_dev)); + + /* Create dev.hn.UNIT.tx sysctl tree */ + sc->hn_tx_sysctl_tree = SYSCTL_ADD_NODE(ctx, child, OID_AUTO, "tx", + CTLFLAG_RD, 0, ""); + + for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { + int error; + + error = hn_create_tx_ring(sc, i); + if (error) + return error; } - if (sc->hn_tx_data_dtag != NULL) - bus_dma_tag_destroy(sc->hn_tx_data_dtag); - if (sc->hn_tx_rndis_dtag != NULL) - bus_dma_tag_destroy(sc->hn_tx_rndis_dtag); - free(sc->hn_txdesc, M_NETVSC); - mtx_destroy(&sc->hn_txlist_spin); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "no_txdescs", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_tx_ring, hn_no_txdescs), + hn_tx_stat_ulong_sysctl, "LU", "# of times short of TX descs"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "send_failed", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_tx_ring, hn_send_failed), + hn_tx_stat_ulong_sysctl, "LU", "# of hyper-v sending failure"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "txdma_failed", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_tx_ring, hn_txdma_failed), + hn_tx_stat_ulong_sysctl, "LU", "# of TX DMA failure"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_collapsed", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_tx_ring, hn_tx_collapsed), + hn_tx_stat_ulong_sysctl, "LU", "# of TX mbuf collapsed"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney", + CTLTYPE_ULONG | CTLFLAG_RW, sc, + __offsetof(struct hn_tx_ring, hn_tx_chimney), + hn_tx_stat_ulong_sysctl, "LU", "# of chimney send"); + SYSCTL_ADD_INT(ctx, child, OID_AUTO, "txdesc_cnt", + CTLFLAG_RD, &sc->hn_tx_ring[0].hn_txdesc_cnt, 0, + "# of total TX descs"); + SYSCTL_ADD_INT(ctx, child, OID_AUTO, "tx_chimney_max", + CTLFLAG_RD, &sc->hn_tx_chimney_max, 0, + "Chimney send packet size upper boundary"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "tx_chimney_size", + CTLTYPE_INT | CTLFLAG_RW, sc, 0, hn_tx_chimney_size_sysctl, + "I", "Chimney send packet size limit"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "direct_tx_size", + CTLTYPE_INT | CTLFLAG_RW, sc, + __offsetof(struct hn_tx_ring, hn_direct_tx_size), + hn_tx_conf_int_sysctl, "I", + "Size of the packet for direct transmission"); + SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "sched_tx", + CTLTYPE_INT | CTLFLAG_RW, sc, + __offsetof(struct hn_tx_ring, hn_sched_tx), + hn_tx_conf_int_sysctl, "I", + "Always schedule transmission " + "instead of doing direct transmission"); + + return 0; } static void -hn_start_taskfunc(void *xsc, int pending __unused) +hn_set_tx_chimney_size(struct hn_softc *sc, int chimney_size) { - struct hn_softc *sc = xsc; + int i; NV_LOCK(sc); - hn_start_locked(sc->hn_ifp, 0); + for (i = 0; i < sc->hn_tx_ring_cnt; ++i) + sc->hn_tx_ring[i].hn_tx_chimney_size = chimney_size; NV_UNLOCK(sc); } static void -hn_txeof_taskfunc(void *xsc, int pending __unused) +hn_destroy_tx_data(struct hn_softc *sc) { - struct hn_softc *sc = xsc; - struct ifnet *ifp = sc->hn_ifp; + int i; - NV_LOCK(sc); - atomic_clear_int(&ifp->if_drv_flags, IFF_DRV_OACTIVE); - hn_start_locked(ifp, 0); - NV_UNLOCK(sc); + if (sc->hn_tx_ring_cnt == 0) + return; + + for (i = 0; i < sc->hn_tx_ring_cnt; ++i) + hn_destroy_tx_ring(&sc->hn_tx_ring[i]); + + free(sc->hn_tx_ring, M_NETVSC); + sc->hn_tx_ring = NULL; + + sc->hn_tx_ring_cnt = 0; +} + +static void +hn_start_taskfunc(void *xtxr, int pending __unused) +{ + struct hn_tx_ring *txr = xtxr; + + mtx_lock(&txr->hn_tx_lock); + hn_start_locked(txr, 0); + mtx_unlock(&txr->hn_tx_lock); +} + +static void +hn_txeof_taskfunc(void *xtxr, int pending __unused) +{ + struct hn_tx_ring *txr = xtxr; + + mtx_lock(&txr->hn_tx_lock); + atomic_clear_int(&txr->hn_sc->hn_ifp->if_drv_flags, IFF_DRV_OACTIVE); + hn_start_locked(txr, 0); + mtx_unlock(&txr->hn_tx_lock); +} + +static void +hn_stop_tx_tasks(struct hn_softc *sc) +{ + int i; + + for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { + struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; + + taskqueue_drain(txr->hn_tx_taskq, &txr->hn_start_task); + taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task); + } } static void -- cgit v1.2.3 From 57fb9b3fd1a4db18cb9f918445ae36726752222a Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Thu, 18 Feb 2016 07:44:14 +0000 Subject: hyperv/hn: Use buf_ring for txdesc list So one spinlock is avoided, which would be potentially dangerous for virtual machine, if the spinlock holder was scheduled out by the host, as noted by royger. Old spinlock based txdesc list is still kept around, so we could have a safe fallback. No performance regression nor improvement is observed. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5290 --- sys/dev/hyperv/netvsc/hv_net_vsc.h | 10 ++++ sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 66 +++++++++++++++++++++++---- 2 files changed, 66 insertions(+), 10 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h index fa71af236f17..693c584ea5f6 100644 --- a/sys/dev/hyperv/netvsc/hv_net_vsc.h +++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h @@ -58,6 +58,8 @@ #include +#define HN_USE_TXDESC_BUFRING + MALLOC_DECLARE(M_NETVSC); #define NVSP_INVALID_PROTOCOL_VERSION (0xFFFFFFFF) @@ -990,8 +992,12 @@ typedef struct { hv_bool_uint8_t link_state; } netvsc_device_info; +#ifndef HN_USE_TXDESC_BUFRING struct hn_txdesc; SLIST_HEAD(hn_txdesc_list, hn_txdesc); +#else +struct buf_ring; +#endif struct hn_rx_ring { struct lro_ctrl hn_lro; @@ -1012,8 +1018,12 @@ struct hn_rx_ring { #define HN_TRUST_HCSUM_UDP 0x0004 struct hn_tx_ring { +#ifndef HN_USE_TXDESC_BUFRING struct mtx hn_txlist_spin; struct hn_txdesc_list hn_txlist; +#else + struct buf_ring *hn_txdesc_br; +#endif int hn_txdesc_cnt; int hn_txdesc_avail; int hn_txeof; diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 23e43cedae0c..9a2b2f74a4ad 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -70,6 +70,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include @@ -151,7 +152,9 @@ __FBSDID("$FreeBSD$"); #define HN_DIRECT_TX_SIZE_DEF 128 struct hn_txdesc { +#ifndef HN_USE_TXDESC_BUFRING SLIST_ENTRY(hn_txdesc) link; +#endif struct mbuf *m; struct hn_tx_ring *txr; int refs; @@ -258,6 +261,14 @@ SYSCTL_INT(_hw_hn, OID_AUTO, share_tx_taskq, CTLFLAG_RDTUN, static struct taskqueue *hn_tx_taskq; +#ifndef HN_USE_TXDESC_BUFRING +static int hn_use_txdesc_bufring = 0; +#else +static int hn_use_txdesc_bufring = 1; +#endif +SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD, + &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors"); + /* * Forward declarations */ @@ -570,6 +581,7 @@ hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) txd->flags |= HN_TXD_FLAG_ONLIST; +#ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); KASSERT(txr->hn_txdesc_avail >= 0 && txr->hn_txdesc_avail < txr->hn_txdesc_cnt, @@ -577,6 +589,10 @@ hn_txdesc_put(struct hn_tx_ring *txr, struct hn_txdesc *txd) txr->hn_txdesc_avail++; SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); mtx_unlock_spin(&txr->hn_txlist_spin); +#else + atomic_add_int(&txr->hn_txdesc_avail, 1); + buf_ring_enqueue(txr->hn_txdesc_br, txd); +#endif return 1; } @@ -586,6 +602,7 @@ hn_txdesc_get(struct hn_tx_ring *txr) { struct hn_txdesc *txd; +#ifndef HN_USE_TXDESC_BUFRING mtx_lock_spin(&txr->hn_txlist_spin); txd = SLIST_FIRST(&txr->hn_txlist); if (txd != NULL) { @@ -595,8 +612,14 @@ hn_txdesc_get(struct hn_tx_ring *txr) SLIST_REMOVE_HEAD(&txr->hn_txlist, link); } mtx_unlock_spin(&txr->hn_txlist_spin); +#else + txd = buf_ring_dequeue_sc(txr->hn_txdesc_br); +#endif if (txd != NULL) { +#ifdef HN_USE_TXDESC_BUFRING + atomic_subtract_int(&txr->hn_txdesc_avail, 1); +#endif KASSERT(txd->m == NULL && txd->refs == 0 && (txd->flags & HN_TXD_FLAG_ONLIST), ("invalid txd")); txd->flags &= ~HN_TXD_FLAG_ONLIST; @@ -2048,13 +2071,20 @@ hn_create_tx_ring(struct hn_softc *sc, int id) txr->hn_sc = sc; +#ifndef HN_USE_TXDESC_BUFRING mtx_init(&txr->hn_txlist_spin, "hn txlist", NULL, MTX_SPIN); +#endif mtx_init(&txr->hn_tx_lock, "hn tx", NULL, MTX_DEF); txr->hn_txdesc_cnt = HN_TX_DESC_CNT; txr->hn_txdesc = malloc(sizeof(struct hn_txdesc) * txr->hn_txdesc_cnt, M_NETVSC, M_WAITOK | M_ZERO); +#ifndef HN_USE_TXDESC_BUFRING SLIST_INIT(&txr->hn_txlist); +#else + txr->hn_txdesc_br = buf_ring_alloc(txr->hn_txdesc_cnt, M_NETVSC, + M_WAITOK, &txr->hn_tx_lock); +#endif txr->hn_tx_taskq = sc->hn_tx_taskq; TASK_INIT(&txr->hn_start_task, 0, hn_start_taskfunc, txr); @@ -2158,7 +2188,11 @@ hn_create_tx_ring(struct hn_softc *sc, int id) /* All set, put it to list */ txd->flags |= HN_TXD_FLAG_ONLIST; +#ifndef HN_USE_TXDESC_BUFRING SLIST_INSERT_HEAD(&txr->hn_txlist, txd, link); +#else + buf_ring_enqueue(txr->hn_txdesc_br, txd); +#endif } txr->hn_txdesc_avail = txr->hn_txdesc_cnt; @@ -2190,6 +2224,20 @@ hn_create_tx_ring(struct hn_softc *sc, int id) return 0; } +static void +hn_txdesc_dmamap_destroy(struct hn_txdesc *txd) +{ + struct hn_tx_ring *txr = txd->txr; + + KASSERT(txd->m == NULL, ("still has mbuf installed")); + KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, ("still dma mapped")); + + bus_dmamap_unload(txr->hn_tx_rndis_dtag, txd->rndis_msg_dmap); + bus_dmamem_free(txr->hn_tx_rndis_dtag, txd->rndis_msg, + txd->rndis_msg_dmap); + bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap); +} + static void hn_destroy_tx_ring(struct hn_tx_ring *txr) { @@ -2198,19 +2246,15 @@ hn_destroy_tx_ring(struct hn_tx_ring *txr) if (txr->hn_txdesc == NULL) return; +#ifndef HN_USE_TXDESC_BUFRING while ((txd = SLIST_FIRST(&txr->hn_txlist)) != NULL) { - KASSERT(txd->m == NULL, ("still has mbuf installed")); - KASSERT((txd->flags & HN_TXD_FLAG_DMAMAP) == 0, - ("still dma mapped")); SLIST_REMOVE_HEAD(&txr->hn_txlist, link); - - bus_dmamap_unload(txr->hn_tx_rndis_dtag, - txd->rndis_msg_dmap); - bus_dmamem_free(txr->hn_tx_rndis_dtag, - txd->rndis_msg, txd->rndis_msg_dmap); - - bus_dmamap_destroy(txr->hn_tx_data_dtag, txd->data_dmap); + hn_txdesc_dmamap_destroy(txd); } +#else + while ((txd = buf_ring_dequeue_sc(txr->hn_txdesc_br)) != NULL) + hn_txdesc_dmamap_destroy(txd); +#endif if (txr->hn_tx_data_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_data_dtag); @@ -2219,7 +2263,9 @@ hn_destroy_tx_ring(struct hn_tx_ring *txr) free(txr->hn_txdesc, M_NETVSC); txr->hn_txdesc = NULL; +#ifndef HN_USE_TXDESC_BUFRING mtx_destroy(&txr->hn_txlist_spin); +#endif mtx_destroy(&txr->hn_tx_lock); } -- cgit v1.2.3 From cd1832fe92812ca2b8b8c4b26840806a31023baf Mon Sep 17 00:00:00 2001 From: Thomas Quinot Date: Thu, 18 Feb 2016 08:44:16 +0000 Subject: Reorganize the handling all-zeroes terminal block in sparse mode The intent of the previous code in that case was to force an explicit write, but the implementation was incorrect, and as a result the write was never performed. This new implementation instead uses ftruncate(2) to extend the file with a trailing hole. Also introduce regression tests for these cases. PR: 189284 (original PR whose fix introduced this bug) PR: 207092 Differential Revision: D5248 Reviewed by: sobomax,kib MFC after: 2 weeks --- bin/dd/Makefile | 13 ++++++++++++- bin/dd/dd.c | 36 +++++++++++++++++++++--------------- bin/dd/dd.h | 1 + bin/dd/gen.c | 13 ++++++++++--- bin/dd/ref.obs_zeroes | 3 +++ 5 files changed, 47 insertions(+), 19 deletions(-) create mode 100644 bin/dd/ref.obs_zeroes diff --git a/bin/dd/Makefile b/bin/dd/Makefile index 5f07dbc248ac..6b17d290c6ee 100644 --- a/bin/dd/Makefile +++ b/bin/dd/Makefile @@ -24,7 +24,18 @@ test: ${PROG} gen LC_ALL=en_US.US-ASCII hexdump -C | \ diff -I FreeBSD - ${.CURDIR}/ref.${conv} .endfor - @rm -f gen + @${ECHO} "testing sparse file (obs zeroes)" + @./gen 189284 | ./dd ibs=16 obs=8 conv=sparse of=obs_zeroes 2> /dev/null + @hexdump -C obs_zeroes | diff -I FreeBSD - ${.CURDIR}/ref.obs_zeroes + + @${ECHO} "testing spase file (all zeroes)" + @./dd if=/dev/zero of=1M_zeroes bs=1048576 count=1 2> /dev/null + @./dd if=1M_zeroes of=1M_zeroes.1 bs=1048576 conv=sparse 2> /dev/null + @./dd if=1M_zeroes of=1M_zeroes.2 bs=1048576 2> /dev/null + @diff 1M_zeroes 1M_zeroes.1 + @diff 1M_zeroes 1M_zeroes.2 + + @rm -f gen 1M_zeroes* obs_zeroes .if ${MK_TESTS} != "no" SUBDIR+= tests diff --git a/bin/dd/dd.c b/bin/dd/dd.c index 8ae11a7d1636..4c31a5e81260 100644 --- a/bin/dd/dd.c +++ b/bin/dd/dd.c @@ -77,7 +77,6 @@ STAT st; /* statistics */ void (*cfunc)(void); /* conversion function */ uintmax_t cpy_cnt; /* # of blocks to copy */ static off_t pending = 0; /* pending seek if sparse */ -static off_t last_sp = 0; /* size of last added sparse block */ u_int ddflags = 0; /* conversion options */ size_t cbsz; /* conversion block size */ uintmax_t files_cnt = 1; /* # of files to copy */ @@ -409,6 +408,15 @@ dd_close(void) } if (out.dbcnt || pending) dd_out(1); + + /* + * If the file ends with a hole, ftruncate it to extend its size + * up to the end of the hole (without having to write any data). + */ + if (out.seek_offset > 0 && (out.flags & ISTRUNC)) { + if (ftruncate(out.fd, out.seek_offset) == -1) + err(1, "truncating %s", out.name); + } } void @@ -457,29 +465,27 @@ dd_out(int force) } if (sparse && !force) { pending += cnt; - last_sp = cnt; nw = cnt; } else { if (pending != 0) { - /* If forced to write, and we have no - * data left, we need to write the last - * sparse block explicitly. + /* + * Seek past hole. Note that we need to record the + * reached offset, because we might have no more data + * to write, in which case we'll need to call + * ftruncate to extend the file size. */ - if (force && cnt == 0) { - pending -= last_sp; - assert(outp == out.db); - memset(outp, 0, cnt); - } - if (lseek(out.fd, pending, SEEK_CUR) == - -1) + out.seek_offset = lseek(out.fd, pending, SEEK_CUR); + if (out.seek_offset == -1) err(2, "%s: seek error creating sparse file", out.name); - pending = last_sp = 0; + pending = 0; } - if (cnt) + if (cnt) { nw = write(out.fd, outp, cnt); - else + out.seek_offset = 0; + } else { return; + } } if (nw <= 0) { diff --git a/bin/dd/dd.h b/bin/dd/dd.h index a8b45e594776..196f804c2ebe 100644 --- a/bin/dd/dd.h +++ b/bin/dd/dd.h @@ -54,6 +54,7 @@ typedef struct { const char *name; /* name */ int fd; /* file descriptor */ off_t offset; /* # of blocks to skip */ + off_t seek_offset; /* offset of last seek past output hole */ } IO; typedef struct { diff --git a/bin/dd/gen.c b/bin/dd/gen.c index 9c7571a9ab91..d53d8fb2b5ac 100644 --- a/bin/dd/gen.c +++ b/bin/dd/gen.c @@ -5,13 +5,20 @@ */ #include +#include int -main(int argc __unused, char **argv __unused) +main(int argc, char **argv) { int i; - for (i = 0; i < 256; i++) - putchar(i); + if (argc > 1 && !strcmp(argv[1], "189284")) { + fputs("ABCDEFGH", stdout); + for (i = 0; i < 8; i++) + putchar(0); + } else { + for (i = 0; i < 256; i++) + putchar(i); + } return (0); } diff --git a/bin/dd/ref.obs_zeroes b/bin/dd/ref.obs_zeroes new file mode 100644 index 000000000000..473ff7cc4108 --- /dev/null +++ b/bin/dd/ref.obs_zeroes @@ -0,0 +1,3 @@ +$FreeBSD$ +00000000 41 42 43 44 45 46 47 48 00 00 00 00 00 00 00 00 |ABCDEFGH........| +00000010 -- cgit v1.2.3 From ac9a695a93cff2527aa22cd2a421b01d5faa710f Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Thu, 18 Feb 2016 09:26:58 +0000 Subject: Remove unneeded definitions after r291406. Also remove redundant and not used L1_ADDR_BITS definition. --- sys/arm/include/pte-v6.h | 22 ---------------------- sys/arm/include/pte.h | 2 -- 2 files changed, 24 deletions(-) diff --git a/sys/arm/include/pte-v6.h b/sys/arm/include/pte-v6.h index c0eb4173360e..67484ccf1681 100644 --- a/sys/arm/include/pte-v6.h +++ b/sys/arm/include/pte-v6.h @@ -296,33 +296,11 @@ */ #define AP_KRW 0x01 /* kernel read/write */ -/* - * lib/libkvm/kvm_arm.c - */ -#define L1_ADDR_MASK 0xfffffc00 - /* * lib/libkvm/kvm_arm.c */ #define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ -#ifndef LOCORE -/* - * sys/arm/arm/minidump_machdep.c - * sys/arm/arm/pmap.c - * sys/arm/arm/pmap.h (hack for our hack in pmap.h ) - * lib/libkvm/kvm_arm.c - */ -typedef uint32_t pd_entry_t; /* page directory entry */ - -/* - * sys/arm/arm/minidump_machdep.c - * sys/arm/arm/pmap.c - * sys/arm/arm/pmap.h (hack for our hack in pmap.h ) - * sys/arm/include/param.h - */ -typedef uint32_t pt_entry_t; /* page table entry */ -#endif // ----------------------------------------------------------------------------- #endif /* !_MACHINE_PTE_H_ */ diff --git a/sys/arm/include/pte.h b/sys/arm/include/pte.h index 8c26aabe5ce7..e65242b1e730 100644 --- a/sys/arm/include/pte.h +++ b/sys/arm/include/pte.h @@ -73,7 +73,6 @@ typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ #define L2_INVAL 0x00 /* L2 invalid type */ /* L1 and L2 address masks */ -#define L1_ADDR_MASK 0xfffffc00 #define L2_ADDR_MASK 0xfffff000 /* @@ -152,7 +151,6 @@ typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 * table. */ -#define L1_ADDR_BITS 0xfff00000 /* L1 PTE address bits */ #define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ #define L1_TABLE_SIZE 0x4000 /* 16K */ -- cgit v1.2.3 From 9395e5a63419b47f2094f96628ba99c42126b625 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Thu, 18 Feb 2016 09:28:16 +0000 Subject: Remove redundant L2_ADDR_MASK definition and replace it by primary one. --- sys/arm/arm/pmap.c | 2 +- sys/arm/include/pte.h | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c index 99a39a0ee522..529e9f1d4518 100644 --- a/sys/arm/arm/pmap.c +++ b/sys/arm/arm/pmap.c @@ -2549,7 +2549,7 @@ pmap_remove_pages(pmap_t pmap) l2b = pmap_get_l2_bucket(pmap, pv->pv_va); KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - m = PHYS_TO_VM_PAGE(*pt & L2_ADDR_MASK); + m = PHYS_TO_VM_PAGE(*pt & L2_S_FRAME); KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); *pt = 0; PTE_SYNC(pt); diff --git a/sys/arm/include/pte.h b/sys/arm/include/pte.h index e65242b1e730..3402454aac83 100644 --- a/sys/arm/include/pte.h +++ b/sys/arm/include/pte.h @@ -72,9 +72,6 @@ typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ #define L2_MASK 0x03 /* Mask for L2 entry type */ #define L2_INVAL 0x00 /* L2 invalid type */ -/* L1 and L2 address masks */ -#define L2_ADDR_MASK 0xfffff000 - /* * The ARM MMU architecture was introduced with ARM v3 (previous ARM * architecture versions used an optional off-CPU memory controller -- cgit v1.2.3 From d97d068fe87e53dddf99d2f8aae739b0c9fb6e86 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Thu, 18 Feb 2016 09:30:04 +0000 Subject: Remove redundant ARM_L2_ADDR_BITS and L2_ADDR_BITS definitions and replace them by primary ones where needed. --- lib/libkvm/kvm_arm.c | 2 +- lib/libkvm/kvm_arm.h | 3 --- sys/arm/include/pmap.h | 2 +- sys/arm/include/pte-v6.h | 5 ----- sys/arm/include/pte.h | 2 -- 5 files changed, 2 insertions(+), 12 deletions(-) diff --git a/lib/libkvm/kvm_arm.c b/lib/libkvm/kvm_arm.c index bbead35ec741..ae5fb5bc8646 100644 --- a/lib/libkvm/kvm_arm.c +++ b/lib/libkvm/kvm_arm.c @@ -183,7 +183,7 @@ _arm_initvtop(kvm_t *kd) #define l1pte_section_p(pde) (((pde) & ARM_L1_TYPE_MASK) == ARM_L1_TYPE_S) #define l1pte_valid(pde) ((pde) != 0) #define l2pte_valid(pte) ((pte) != 0) -#define l2pte_index(v) (((v) & ARM_L2_ADDR_BITS) >> ARM_L2_S_SHIFT) +#define l2pte_index(v) (((v) & ARM_L1_S_OFFSET) >> ARM_L2_S_SHIFT) static int diff --git a/lib/libkvm/kvm_arm.h b/lib/libkvm/kvm_arm.h index 096a6b1fbc6a..404d63a54293 100644 --- a/lib/libkvm/kvm_arm.h +++ b/lib/libkvm/kvm_arm.h @@ -72,8 +72,6 @@ typedef uint32_t arm_pt_entry_t; #define ARM_L2_TYPE_T 0x03 /* Tiny Page - 1k - not used */ #define ARM_L2_TYPE_MASK 0x03 -#define ARM_L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ - #ifdef __arm__ #include @@ -106,7 +104,6 @@ _Static_assert(L2_TYPE_S == ARM_L2_TYPE_S, "L2_TYPE_S mismatch"); _Static_assert(L2_TYPE_T == ARM_L2_TYPE_T, "L2_TYPE_T mismatch"); #endif _Static_assert(L2_TYPE_MASK == ARM_L2_TYPE_MASK, "L2_TYPE_MASK mismatch"); -_Static_assert(L2_ADDR_BITS == ARM_L2_ADDR_BITS, "L2_ADDR_BITS mismatch"); #endif int _arm_native(kvm_t *); diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h index 8222652084b4..8372929cb010 100644 --- a/sys/arm/include/pmap.h +++ b/sys/arm/include/pmap.h @@ -489,7 +489,7 @@ void pmap_use_minicache(vm_offset_t, vm_size_t); #define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) #define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) -#define l2pte_index(v) (((v) & L2_ADDR_BITS) >> L2_S_SHIFT) +#define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) #define l2pte_valid(pte) ((pte) != 0) #define l2pte_pa(pte) ((pte) & L2_S_FRAME) #define l2pte_minidata(pte) (((pte) & \ diff --git a/sys/arm/include/pte-v6.h b/sys/arm/include/pte-v6.h index 67484ccf1681..9febb79e2e1e 100644 --- a/sys/arm/include/pte-v6.h +++ b/sys/arm/include/pte-v6.h @@ -296,11 +296,6 @@ */ #define AP_KRW 0x01 /* kernel read/write */ -/* - * lib/libkvm/kvm_arm.c - */ -#define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ - // ----------------------------------------------------------------------------- #endif /* !_MACHINE_PTE_H_ */ diff --git a/sys/arm/include/pte.h b/sys/arm/include/pte.h index 3402454aac83..c83ed2fbbc25 100644 --- a/sys/arm/include/pte.h +++ b/sys/arm/include/pte.h @@ -148,8 +148,6 @@ typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 * table. */ -#define L2_ADDR_BITS 0x000ff000 /* L2 PTE address bits */ - #define L1_TABLE_SIZE 0x4000 /* 16K */ #define L2_TABLE_SIZE 0x1000 /* 4K */ /* -- cgit v1.2.3 From 98bc9384c5bef1ecd0de5cc30c1aa4c8337551a2 Mon Sep 17 00:00:00 2001 From: Wojciech Macek Date: Thu, 18 Feb 2016 11:26:08 +0000 Subject: Fix ThunderX external PEM bus offset Obtained from: Semihalf Sponsored by: Cavium Approved by: cognet (mentor) Reviewed by: zbb Differential revision: https://reviews.freebsd.org/D5293 --- sys/arm64/arm64/gic_v3_its.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/arm64/arm64/gic_v3_its.c b/sys/arm64/arm64/gic_v3_its.c index 7a547c1cd70b..c847f958493c 100644 --- a/sys/arm64/arm64/gic_v3_its.c +++ b/sys/arm64/arm64/gic_v3_its.c @@ -59,7 +59,7 @@ __FBSDID("$FreeBSD$"); #include "gic_v3_reg.h" #include "gic_v3_var.h" -#define GIC_V3_ITS_QUIRK_THUNDERX_PEM_BUS_OFFSET 144 +#define GIC_V3_ITS_QUIRK_THUNDERX_PEM_BUS_OFFSET 88 #include "pic_if.h" -- cgit v1.2.3 From e2d4f32f4e7aaadd71f61682fe45b311e8c6dce0 Mon Sep 17 00:00:00 2001 From: Zbigniew Bodek Date: Thu, 18 Feb 2016 11:53:57 +0000 Subject: Fix bug in ofwbus_release_resource() for non-ofwbus descendants Resource list for devices that are not ofwbus descendants, but got to ofwbus method via bus_generic_release_resource() call chain, cannot be found using BUS_GET_RESOURCE_LIST() used by ofwbus. In that case, changing device's resource list should be avoided (will not contain resource list prepared by ofw or simplebus). Pointy-hat to: zbb Reviewed by: wma Obtained from: Semihalf Sponsored by: Cavium Differential Revision: https://reviews.freebsd.org/D5304 --- sys/dev/ofw/ofwbus.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/sys/dev/ofw/ofwbus.c b/sys/dev/ofw/ofwbus.c index 142ee1aa3706..23e604a47dad 100644 --- a/sys/dev/ofw/ofwbus.c +++ b/sys/dev/ofw/ofwbus.c @@ -271,12 +271,17 @@ ofwbus_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct resource_list_entry *rle; + int passthrough; int error; - /* Clean resource list entry */ - rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child), type, rid); - if (rle != NULL) - rle->res = NULL; + passthrough = (device_get_parent(child) != bus); + if (!passthrough) { + /* Clean resource list entry */ + rle = resource_list_find(BUS_GET_RESOURCE_LIST(bus, child), + type, rid); + if (rle != NULL) + rle->res = NULL; + } if ((rman_get_flags(r) & RF_ACTIVE) != 0) { error = bus_deactivate_resource(child, type, rid, r); -- cgit v1.2.3 From b998c9656bd16ec804af2030769c1406e01942df Mon Sep 17 00:00:00 2001 From: Zbigniew Bodek Date: Thu, 18 Feb 2016 13:00:04 +0000 Subject: Introduce bus_get_bus_tag() method Provide bus_get_bus_tag() for sparc64, powerpc, arm, arm64 and mips nexus and its children in order to return a platform specific default tag. This is required to ensure generic correctness of the bus_space tag. It is especially needed for arches where child bus tag does not match the parent bus tag. This solves the problem with ppc architecture where the PCI bus tag differs from parent bus tag which is big-endian. This commit is a part of the following patch: https://reviews.freebsd.org/D4879 Submitted by: Marcin Mazurek Obtained from: Semihalf Sponsored by: Annapurna Labs Reviewed by: jhibbits, mmel Differential Revision: https://reviews.freebsd.org/D4879 --- sys/arm/arm/nexus.c | 13 +++++++++++++ sys/arm64/arm64/nexus.c | 9 +++++++++ sys/kern/bus_if.m | 11 +++++++++++ sys/kern/subr_bus.c | 33 +++++++++++++++++++++++++++++++++ sys/powerpc/powerpc/nexus.c | 9 +++++++++ sys/sparc64/sparc64/nexus.c | 9 +++++++++ sys/sys/bus.h | 4 ++++ 7 files changed, 88 insertions(+) diff --git a/sys/arm/arm/nexus.c b/sys/arm/arm/nexus.c index 97e9e2595335..b1c6b089e6bc 100644 --- a/sys/arm/arm/nexus.c +++ b/sys/arm/arm/nexus.c @@ -85,6 +85,7 @@ static struct resource *nexus_alloc_resource(device_t, device_t, int, int *, rman_res_t, rman_res_t, rman_res_t, u_int); static int nexus_activate_resource(device_t, device_t, int, int, struct resource *); +static bus_space_tag_t nexus_get_bus_tag(device_t, device_t); #ifdef ARM_INTRNG #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); @@ -124,6 +125,7 @@ static device_method_t nexus_methods[] = { DEVMETHOD(bus_release_resource, nexus_release_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), + DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), #ifdef ARM_INTRNG DEVMETHOD(bus_describe_intr, nexus_describe_intr), #ifdef SMP @@ -260,6 +262,17 @@ nexus_release_resource(device_t bus, device_t child, int type, int rid, return (rman_release_resource(res)); } +static bus_space_tag_t +nexus_get_bus_tag(device_t bus __unused, device_t child __unused) +{ + +#ifdef FDT + return(fdtbus_bs_tag); +#else + return((void *)1); +#endif +} + static int nexus_config_intr(device_t dev, int irq, enum intr_trigger trig, enum intr_polarity pol) diff --git a/sys/arm64/arm64/nexus.c b/sys/arm64/arm64/nexus.c index 611addd9ad4b..c56c7aa83439 100644 --- a/sys/arm64/arm64/nexus.c +++ b/sys/arm64/arm64/nexus.c @@ -113,6 +113,7 @@ static int nexus_deactivate_resource(device_t, device_t, int, int, static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep); static int nexus_teardown_intr(device_t, device_t, struct resource *, void *); +static bus_space_tag_t nexus_get_bus_tag(device_t, device_t); #ifdef SMP static int nexus_bind_intr(device_t, device_t, struct resource *, int); #endif @@ -134,6 +135,7 @@ static device_method_t nexus_methods[] = { DEVMETHOD(bus_deactivate_resource, nexus_deactivate_resource), DEVMETHOD(bus_setup_intr, nexus_setup_intr), DEVMETHOD(bus_teardown_intr, nexus_teardown_intr), + DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), #ifdef SMP DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif @@ -307,6 +309,13 @@ nexus_bind_intr(device_t dev, device_t child, struct resource *irq, int cpu) } #endif +static bus_space_tag_t +nexus_get_bus_tag(device_t bus __unused, device_t child __unused) +{ + + return(&memmap_bus); +} + static int nexus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) diff --git a/sys/kern/bus_if.m b/sys/kern/bus_if.m index e55b1ce9ea6b..8acadd7c10b7 100644 --- a/sys/kern/bus_if.m +++ b/sys/kern/bus_if.m @@ -636,6 +636,17 @@ METHOD bus_dma_tag_t get_dma_tag { device_t _child; } DEFAULT bus_generic_get_dma_tag; +/** + * @brief Returns bus_space_tag_t for use w/ devices on the bus. + * + * @param _dev the parent device of @p _child + * @param _child the device to which the tag will belong + */ +METHOD bus_space_tag_t get_bus_tag { + device_t _dev; + device_t _child; +} DEFAULT bus_generic_get_bus_tag; + /** * @brief Allow the bus to determine the unit number of a device. * diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 6aca991954c3..3e4568b58e66 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -4094,6 +4094,22 @@ bus_generic_get_dma_tag(device_t dev, device_t child) return (NULL); } +/** + * @brief Helper function for implementing BUS_GET_BUS_TAG(). + * + * This simple implementation of BUS_GET_BUS_TAG() simply calls the + * BUS_GET_BUS_TAG() method of the parent of @p dev. + */ +bus_space_tag_t +bus_generic_get_bus_tag(device_t dev, device_t child) +{ + + /* Propagate up the bus hierarchy until someone handles it. */ + if (dev->parent != NULL) + return (BUS_GET_BUS_TAG(dev->parent, child)); + return (NULL); +} + /** * @brief Helper function for implementing BUS_GET_RESOURCE(). * @@ -4575,6 +4591,23 @@ bus_get_dma_tag(device_t dev) return (BUS_GET_DMA_TAG(parent, dev)); } +/** + * @brief Wrapper function for BUS_GET_BUS_TAG(). + * + * This function simply calls the BUS_GET_BUS_TAG() method of the + * parent of @p dev. + */ +bus_space_tag_t +bus_get_bus_tag(device_t dev) +{ + device_t parent; + + parent = device_get_parent(dev); + if (parent == NULL) + return (NULL); + return (BUS_GET_BUS_TAG(parent, dev)); +} + /** * @brief Wrapper function for BUS_GET_DOMAIN(). * diff --git a/sys/powerpc/powerpc/nexus.c b/sys/powerpc/powerpc/nexus.c index dff21f804146..1ad118c6daaf 100644 --- a/sys/powerpc/powerpc/nexus.c +++ b/sys/powerpc/powerpc/nexus.c @@ -66,6 +66,7 @@ static bus_setup_intr_t nexus_setup_intr; static bus_teardown_intr_t nexus_teardown_intr; static bus_activate_resource_t nexus_activate_resource; static bus_deactivate_resource_t nexus_deactivate_resource; +static bus_space_tag_t nexus_get_bus_tag(device_t, device_t); #ifdef SMP static bus_bind_intr_t nexus_bind_intr; #endif @@ -87,6 +88,7 @@ static device_method_t nexus_methods[] = { DEVMETHOD(bus_bind_intr, nexus_bind_intr), #endif DEVMETHOD(bus_config_intr, nexus_config_intr), + DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), /* ofw_bus interface */ DEVMETHOD(ofw_bus_map_intr, nexus_ofw_map_intr), @@ -155,6 +157,13 @@ nexus_teardown_intr(device_t bus __unused, device_t child __unused, return (powerpc_teardown_intr(ih)); } +static bus_space_tag_t +nexus_get_bus_tag(device_t bus __unused, device_t child __unused) +{ + + return(&bs_be_tag); +} + #ifdef SMP static int nexus_bind_intr(device_t bus __unused, device_t child __unused, diff --git a/sys/sparc64/sparc64/nexus.c b/sys/sparc64/sparc64/nexus.c index 708222060742..67a954b6e6fe 100644 --- a/sys/sparc64/sparc64/nexus.c +++ b/sys/sparc64/sparc64/nexus.c @@ -98,6 +98,7 @@ static bus_bind_intr_t nexus_bind_intr; #endif static bus_describe_intr_t nexus_describe_intr; static bus_get_dma_tag_t nexus_get_dma_tag; +static bus_get_bus_tag_t nexus_get_bus_tag; static ofw_bus_get_devinfo_t nexus_get_devinfo; static int nexus_inlist(const char *, const char *const *); @@ -135,6 +136,7 @@ static device_method_t nexus_methods[] = { #endif DEVMETHOD(bus_describe_intr, nexus_describe_intr), DEVMETHOD(bus_get_dma_tag, nexus_get_dma_tag), + DEVMETHOD(bus_get_bus_tag, nexus_get_bus_tag), /* ofw_bus interface */ DEVMETHOD(ofw_bus_get_devinfo, nexus_get_devinfo), @@ -502,6 +504,13 @@ nexus_get_dma_tag(device_t bus __unused, device_t child __unused) return (&nexus_dmatag); } +static bus_space_tag_t +nexus_get_bus_tag(device_t bus __unused, device_t child __unused) +{ + + return (&nexus_bustag); +} + static const struct ofw_bus_devinfo * nexus_get_devinfo(device_t bus __unused, device_t child) { diff --git a/sys/sys/bus.h b/sys/sys/bus.h index 4348ae7e304b..15f5c0674d7d 100644 --- a/sys/sys/bus.h +++ b/sys/sys/bus.h @@ -30,6 +30,7 @@ #define _SYS_BUS_H_ #include +#include #include #include @@ -383,6 +384,8 @@ int bus_generic_detach(device_t dev); void bus_generic_driver_added(device_t dev, driver_t *driver); bus_dma_tag_t bus_generic_get_dma_tag(device_t dev, device_t child); +bus_space_tag_t + bus_generic_get_bus_tag(device_t dev, device_t child); int bus_generic_get_domain(device_t dev, device_t child, int *domain); struct resource_list * bus_generic_get_resource_list (device_t, device_t); @@ -448,6 +451,7 @@ int bus_activate_resource(device_t dev, int type, int rid, int bus_deactivate_resource(device_t dev, int type, int rid, struct resource *r); bus_dma_tag_t bus_get_dma_tag(device_t dev); +bus_space_tag_t bus_get_bus_tag(device_t dev); int bus_get_domain(device_t dev, int *domain); int bus_release_resource(device_t dev, int type, int rid, struct resource *r); -- cgit v1.2.3 From a259e55bb9f2e6c206747579a6e4a94bf36e4c5c Mon Sep 17 00:00:00 2001 From: Zbigniew Bodek Date: Thu, 18 Feb 2016 13:07:21 +0000 Subject: Extract common code from PowerPC's ofw_pci Import portions of the PowerPC OF PCI implementation into new file "ofw_pci.c", common for other platforms. The files ofw_pci.c and ofw_pci.h from sys/powerpc/ofw no longer exist. All required declarations are moved to sys/dev/ofw/ofw_pci.h. This creates a new ofw_pci_write_ivar() function and modifies ofw_pci_nranges(), ofw_pci_read_ivar(), ofw_pci_route_interrupt() methods. Most functions contain existing ppc implementations in the majority unchanged. Now there is no need to have multiple identical copies of methods for various architectures. Submitted by: Marcin Mazurek Obtained from: Semihalf Sponsored by: Annapurna Labs Reviewed by: jhibbits, mmel Differential Revision: https://reviews.freebsd.org/D4879 --- sys/conf/files | 1 + sys/dev/ofw/ofw_pci.c | 622 +++++++++++++++++++++++++++++++++++++ sys/dev/ofw/ofw_pci.h | 57 +++- sys/dev/ofw/ofw_subr.c | 3 +- sys/powerpc/mpc85xx/pci_mpc85xx.c | 4 +- sys/powerpc/powermac/cpcht.c | 3 +- sys/powerpc/powermac/grackle.c | 3 +- sys/powerpc/powermac/uninorthpci.c | 3 +- sys/powerpc/powermac/uninorthvar.h | 1 - sys/powerpc/pseries/rtas_pci.c | 3 +- 10 files changed, 684 insertions(+), 16 deletions(-) create mode 100644 sys/dev/ofw/ofw_pci.c diff --git a/sys/conf/files b/sys/conf/files index bbf971319e09..6ecc83c8d792 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -2108,6 +2108,7 @@ dev/ofw/ofw_subr.c optional fdt dev/ofw/ofwbus.c optional fdt dev/ofw/openfirm.c optional fdt dev/ofw/openfirmio.c optional fdt +dev/ofw/ofw_pci.c optional fdt pci dev/ow/ow.c optional ow \ dependency "owll_if.h" \ dependency "own_if.h" diff --git a/sys/dev/ofw/ofw_pci.c b/sys/dev/ofw/ofw_pci.c new file mode 100644 index 000000000000..72958db5d22d --- /dev/null +++ b/sys/dev/ofw/ofw_pci.c @@ -0,0 +1,622 @@ +/*- + * Copyright (c) 2011 Nathan Whitehorn + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ + +#include +__FBSDID("$FreeBSD$"); +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#include +#include + +#include +#include +#include + +#include +#include + +#include "pcib_if.h" + +/* + * If it is necessary to set another value of this for + * some platforms it should be set at fdt.h file + */ +#ifndef PCI_MAP_INTR +#define PCI_MAP_INTR 4 +#endif + +#define PCI_INTR_PINS 4 + +/* + * bus interface. + */ +static struct resource * ofw_pci_alloc_resource(device_t, device_t, + int, int *, u_long, u_long, u_long, u_int); +static int ofw_pci_release_resource(device_t, device_t, int, int, + struct resource *); +static int ofw_pci_activate_resource(device_t, device_t, int, int, + struct resource *); +static int ofw_pci_deactivate_resource(device_t, device_t, int, int, + struct resource *); +static int ofw_pci_adjust_resource(device_t, device_t, int, + struct resource *, u_long, u_long); + +/* + * pcib interface + */ +static int ofw_pci_maxslots(device_t); + +/* + * ofw_bus interface + */ +static phandle_t ofw_pci_get_node(device_t, device_t); + +/* + * local methods + */ +static int ofw_pci_fill_ranges(phandle_t, struct ofw_pci_range *); + +/* + * Driver methods. + */ +static device_method_t ofw_pci_methods[] = { + + /* Device interface */ + DEVMETHOD(device_attach, ofw_pci_attach), + + /* Bus interface */ + DEVMETHOD(bus_print_child, bus_generic_print_child), + DEVMETHOD(bus_read_ivar, ofw_pci_read_ivar), + DEVMETHOD(bus_write_ivar, ofw_pci_write_ivar), + DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), + DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), + DEVMETHOD(bus_alloc_resource, ofw_pci_alloc_resource), + DEVMETHOD(bus_release_resource, ofw_pci_release_resource), + DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource), + DEVMETHOD(bus_deactivate_resource, ofw_pci_deactivate_resource), + DEVMETHOD(bus_adjust_resource, ofw_pci_adjust_resource), +#ifdef __powerpc__ + DEVMETHOD(bus_get_bus_tag, ofw_pci_bus_get_bus_tag), +#endif + + /* pcib interface */ + DEVMETHOD(pcib_maxslots, ofw_pci_maxslots), + DEVMETHOD(pcib_route_interrupt, ofw_pci_route_interrupt), + + /* ofw_bus interface */ + DEVMETHOD(ofw_bus_get_node, ofw_pci_get_node), + + DEVMETHOD_END +}; + +DEFINE_CLASS_0(ofw_pci, ofw_pci_driver, ofw_pci_methods, 0); + +int +ofw_pci_init(device_t dev) +{ + struct ofw_pci_softc *sc; + phandle_t node; + u_int32_t busrange[2]; + struct ofw_pci_range *rp; + int error; + struct ofw_pci_cell_info *cell_info; + + node = ofw_bus_get_node(dev); + sc = device_get_softc(dev); + sc->sc_initialized = 1; + sc->sc_range = NULL; + + cell_info = (struct ofw_pci_cell_info *)malloc(sizeof(*cell_info), + M_DEVBUF, M_WAITOK | M_ZERO); + + sc->sc_cell_info = cell_info; + + if (OF_getencprop(node, "bus-range", busrange, sizeof(busrange)) != 8) + busrange[0] = 0; + + sc->sc_dev = dev; + sc->sc_node = node; + sc->sc_bus = busrange[0]; + + if (sc->sc_quirks & OFW_PCI_QUIRK_RANGES_ON_CHILDREN) { + phandle_t c; + int n, i; + + sc->sc_nrange = 0; + for (c = OF_child(node); c != 0; c = OF_peer(c)) { + n = ofw_pci_nranges(c, cell_info); + if (n > 0) + sc->sc_nrange += n; + } + if (sc->sc_nrange == 0) { + error = ENXIO; + goto out; + } + sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), + M_DEVBUF, M_WAITOK); + i = 0; + for (c = OF_child(node); c != 0; c = OF_peer(c)) { + n = ofw_pci_fill_ranges(c, &sc->sc_range[i]); + if (n > 0) + i += n; + } + KASSERT(i == sc->sc_nrange, ("range count mismatch")); + } else { + sc->sc_nrange = ofw_pci_nranges(node, cell_info); + if (sc->sc_nrange <= 0) { + device_printf(dev, "could not getranges\n"); + error = ENXIO; + goto out; + } + sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), + M_DEVBUF, M_WAITOK); + ofw_pci_fill_ranges(node, sc->sc_range); + } + + sc->sc_io_rman.rm_type = RMAN_ARRAY; + sc->sc_io_rman.rm_descr = "PCI I/O Ports"; + error = rman_init(&sc->sc_io_rman); + if (error) { + device_printf(dev, "rman_init() failed. error = %d\n", error); + goto out; + } + + sc->sc_mem_rman.rm_type = RMAN_ARRAY; + sc->sc_mem_rman.rm_descr = "PCI Memory"; + error = rman_init(&sc->sc_mem_rman); + if (error) { + device_printf(dev, "rman_init() failed. error = %d\n", error); + goto out; + } + + for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && + rp->pci_hi != 0; rp++) { + error = 0; + + switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { + case OFW_PCI_PHYS_HI_SPACE_CONFIG: + break; + case OFW_PCI_PHYS_HI_SPACE_IO: + error = rman_manage_region(&sc->sc_io_rman, rp->pci, + rp->pci + rp->size - 1); + break; + case OFW_PCI_PHYS_HI_SPACE_MEM32: + case OFW_PCI_PHYS_HI_SPACE_MEM64: + error = rman_manage_region(&sc->sc_mem_rman, rp->pci, + rp->pci + rp->size - 1); + break; + } + + if (error) { + device_printf(dev, + "rman_manage_region(%x, %#jx, %#jx) failed. " + "error = %d\n", rp->pci_hi & + OFW_PCI_PHYS_HI_SPACEMASK, rp->pci, + rp->pci + rp->size - 1, error); + goto out; + } + } + + ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(cell_t)); + +out: + free(cell_info, M_DEVBUF); + free(sc->sc_range, M_DEVBUF); + rman_fini(&sc->sc_io_rman); + rman_fini(&sc->sc_mem_rman); + + return (error); +} + +int +ofw_pci_attach(device_t dev) +{ + struct ofw_pci_softc *sc; + int error; + + sc = device_get_softc(dev); + if (!sc->sc_initialized) { + error = ofw_pci_init(dev); + if (error) + return (error); + } + + device_add_child(dev, "pci", -1); + return (bus_generic_attach(dev)); +} + +static int +ofw_pci_maxslots(device_t dev) +{ + + return (PCI_SLOTMAX); +} + +int +ofw_pci_route_interrupt(device_t bus, device_t dev, int pin) +{ + struct ofw_pci_softc *sc; + struct ofw_pci_register reg; + uint32_t pintr, mintr[PCI_MAP_INTR]; + int intrcells; + phandle_t iparent; + + sc = device_get_softc(bus); + pintr = pin; + + /* Fabricate imap information in case this isn't an OFW device */ + bzero(®, sizeof(reg)); + reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | + (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | + (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); + + intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), + &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), + mintr, sizeof(mintr), &iparent); + if (intrcells != 0) { + pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); + return (pintr); + } + + /* + * Maybe it's a real interrupt, not an intpin + */ + if (pin > PCI_INTR_PINS) + return (pin); + + device_printf(bus, "could not route pin %d for device %d.%d\n", + pin, pci_get_slot(dev), pci_get_function(dev)); + return (PCI_INVALID_IRQ); +} + +int +ofw_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) +{ + struct ofw_pci_softc *sc; + + sc = device_get_softc(dev); + + switch (which) { + case PCIB_IVAR_DOMAIN: + *result = device_get_unit(dev); + return (0); + case PCIB_IVAR_BUS: + *result = sc->sc_bus; + return (0); + default: + break; + } + + return (ENOENT); +} + +int +ofw_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) +{ + struct ofw_pci_softc *sc; + + sc = device_get_softc(dev); + + switch (which) { + case PCIB_IVAR_BUS: + sc->sc_bus = value; + return (0); + default: + break; + } + + return (ENOENT); +} + +int +ofw_pci_nranges(phandle_t node, struct ofw_pci_cell_info *info) +{ + ssize_t nbase_ranges; + + if (info == NULL) + return (-1); + + info->host_address_cells = 1; + info->size_cells = 2; + info->pci_address_cell = 3; + + OF_getencprop(OF_parent(node), "#address-cells", + &(info->host_address_cells), sizeof(info->host_address_cells)); + OF_getencprop(node, "#address-cells", + &(info->pci_address_cell), sizeof(info->pci_address_cell)); + OF_getencprop(node, "#size-cells", &(info->size_cells), + sizeof(info->size_cells)); + + nbase_ranges = OF_getproplen(node, "ranges"); + if (nbase_ranges <= 0) + return (-1); + + return (nbase_ranges / sizeof(cell_t) / + (info->pci_address_cell + info->host_address_cells + + info->size_cells)); +} + +static struct resource * +ofw_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, + rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) +{ + struct ofw_pci_softc *sc; + struct resource *rv; + struct rman *rm; + int needactivate; + + needactivate = flags & RF_ACTIVE; + flags &= ~RF_ACTIVE; + + sc = device_get_softc(bus); + + switch (type) { + case SYS_RES_MEMORY: + rm = &sc->sc_mem_rman; + break; + + case SYS_RES_IOPORT: + rm = &sc->sc_io_rman; + break; + + case SYS_RES_IRQ: + return (bus_alloc_resource(bus, type, rid, start, end, count, + flags)); + + default: + device_printf(bus, "unknown resource request from %s\n", + device_get_nameunit(child)); + return (NULL); + } + + rv = rman_reserve_resource(rm, start, end, count, flags, child); + if (rv == NULL) { + device_printf(bus, "failed to reserve resource for %s\n", + device_get_nameunit(child)); + return (NULL); + } + + rman_set_rid(rv, *rid); + + if (needactivate) { + if (bus_activate_resource(child, type, *rid, rv) != 0) { + device_printf(bus, + "failed to activate resource for %s\n", + device_get_nameunit(child)); + rman_release_resource(rv); + return (NULL); + } + } + + return (rv); +} + +static int +ofw_pci_release_resource(device_t bus, device_t child, int type, int rid, + struct resource *res) +{ + + if (rman_get_flags(res) & RF_ACTIVE) { + int error = bus_deactivate_resource(child, type, rid, res); + if (error) + return error; + } + + return (rman_release_resource(res)); +} + +static int +ofw_pci_activate_resource(device_t bus, device_t child, int type, int rid, + struct resource *res) +{ + struct ofw_pci_softc *sc; + bus_space_handle_t handle; + bus_space_tag_t tag; + int rv; + + sc = device_get_softc(bus); + + if (type == SYS_RES_IRQ) { + return (bus_activate_resource(bus, type, rid, res)); + } + if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { + struct ofw_pci_range *rp; + vm_offset_t start; + int space; + + start = (vm_offset_t)rman_get_start(res); + + /* + * Map this through the ranges list + */ + for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && + rp->pci_hi != 0; rp++) { + if (start < rp->pci || start >= rp->pci + rp->size) + continue; + + switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { + case OFW_PCI_PHYS_HI_SPACE_IO: + space = SYS_RES_IOPORT; + break; + case OFW_PCI_PHYS_HI_SPACE_MEM32: + case OFW_PCI_PHYS_HI_SPACE_MEM64: + space = SYS_RES_MEMORY; + break; + default: + space = -1; + } + + if (type == space) { + start += (rp->host - rp->pci); + break; + } + } + + if (bootverbose) + printf("ofw_pci mapdev: start %zx, len %ld\n", start, + rman_get_size(res)); + + tag = BUS_GET_BUS_TAG(child, child); + if (tag == NULL) + return (ENOMEM); + + rman_set_bustag(res, tag); + rv = bus_space_map(tag, start, + rman_get_size(res), 0, &handle); + if (rv != 0) + return (ENOMEM); + + rman_set_bushandle(res, handle); + rman_set_virtual(res, (void *)handle); /* XXX for powerpc only ? */ + } + + return (rman_activate_resource(res)); +} + +#ifdef __powerpc__ +static bus_space_tag_t +ofw_pci_bus_get_bus_tag(device_t bus, device_t child) +{ + + return (&bs_le_tag) +} +#endif + +static int +ofw_pci_deactivate_resource(device_t bus, device_t child, int type, int rid, + struct resource *res) +{ + + /* + * If this is a memory resource, unmap it. + */ + if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { + u_int32_t psize; + + psize = rman_get_size(res); + pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); + } + + return (rman_deactivate_resource(res)); +} + +static int +ofw_pci_adjust_resource(device_t bus, device_t child, int type, + struct resource *res, rman_res_t start, rman_res_t end) +{ + struct rman *rm = NULL; + struct ofw_pci_softc *sc = device_get_softc(bus); + + KASSERT(!(rman_get_flags(res) & RF_ACTIVE), + ("active resources cannot be adjusted")); + if (rman_get_flags(res) & RF_ACTIVE) + return (EINVAL); + + switch (type) { + case SYS_RES_MEMORY: + rm = &sc->sc_mem_rman; + break; + case SYS_RES_IOPORT: + rm = &sc->sc_io_rman; + break; + default: + return (ENXIO); + } + + if (!rman_is_region_manager(res, rm)) + return (EINVAL); + + return (rman_adjust_resource(res, start, end)); +} + +static phandle_t +ofw_pci_get_node(device_t bus, device_t dev) +{ + struct ofw_pci_softc *sc; + + sc = device_get_softc(bus); + /* We only have one child, the PCI bus, which needs our own node. */ + + return (sc->sc_node); +} + +static int +ofw_pci_fill_ranges(phandle_t node, struct ofw_pci_range *ranges) +{ + int host_address_cells = 1, pci_address_cells = 3, size_cells = 2; + cell_t *base_ranges; + ssize_t nbase_ranges; + int nranges; + int i, j, k; + + OF_getencprop(OF_parent(node), "#address-cells", &host_address_cells, + sizeof(host_address_cells)); + OF_getencprop(node, "#address-cells", &pci_address_cells, + sizeof(pci_address_cells)); + OF_getencprop(node, "#size-cells", &size_cells, sizeof(size_cells)); + + nbase_ranges = OF_getproplen(node, "ranges"); + if (nbase_ranges <= 0) + return (-1); + nranges = nbase_ranges / sizeof(cell_t) / + (pci_address_cells + host_address_cells + size_cells); + + base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); + OF_getencprop(node, "ranges", base_ranges, nbase_ranges); + + for (i = 0, j = 0; i < nranges; i++) { + ranges[i].pci_hi = base_ranges[j++]; + ranges[i].pci = 0; + for (k = 0; k < pci_address_cells - 1; k++) { + ranges[i].pci <<= 32; + ranges[i].pci |= base_ranges[j++]; + } + ranges[i].host = 0; + for (k = 0; k < host_address_cells; k++) { + ranges[i].host <<= 32; + ranges[i].host |= base_ranges[j++]; + } + ranges[i].size = 0; + for (k = 0; k < size_cells; k++) { + ranges[i].size <<= 32; + ranges[i].size |= base_ranges[j++]; + } + } + + free(base_ranges, M_DEVBUF); + return (nranges); +} diff --git a/sys/dev/ofw/ofw_pci.h b/sys/dev/ofw/ofw_pci.h index eb60c5baee0b..424b5f13ca33 100644 --- a/sys/dev/ofw/ofw_pci.h +++ b/sys/dev/ofw/ofw_pci.h @@ -82,13 +82,18 @@ #define OFW_PCI_PHYS_HI_SPACE_MEM32 0x02000000 #define OFW_PCI_PHYS_HI_SPACE_MEM64 0x03000000 -#define OFW_PCI_PHYS_HI_BUS(hi) \ +#define OFW_PCI_PHYS_HI_BUS(hi) \ (((hi) & OFW_PCI_PHYS_HI_BUSMASK) >> OFW_PCI_PHYS_HI_BUSSHIFT) -#define OFW_PCI_PHYS_HI_DEVICE(hi) \ +#define OFW_PCI_PHYS_HI_DEVICE(hi) \ (((hi) & OFW_PCI_PHYS_HI_DEVICEMASK) >> OFW_PCI_PHYS_HI_DEVICESHIFT) -#define OFW_PCI_PHYS_HI_FUNCTION(hi) \ +#define OFW_PCI_PHYS_HI_FUNCTION(hi) \ (((hi) & OFW_PCI_PHYS_HI_FUNCTIONMASK) >> OFW_PCI_PHYS_HI_FUNCTIONSHIFT) +/* + * Export class definition for inheritance purposes + */ +DECLARE_CLASS(ofw_pci_driver); + /* * This has the 3 32bit cell values, plus 2 more to make up a 64-bit size. */ @@ -100,4 +105,50 @@ struct ofw_pci_register { u_int32_t size_lo; }; +struct ofw_pci_cell_info { + pcell_t host_address_cells; + pcell_t pci_address_cell; + pcell_t size_cells; + }; + +struct ofw_pci_range { + uint32_t pci_hi; + uint64_t pci; + uint64_t host; + uint64_t size; +}; + +/* + * Quirks for some adapters + */ +enum { + OFW_PCI_QUIRK_RANGES_ON_CHILDREN = 1, +}; + +struct ofw_pci_softc { + device_t sc_dev; + phandle_t sc_node; + int sc_bus; + int sc_initialized; + int sc_quirks; + + struct ofw_pci_range *sc_range; + int sc_nrange; + struct ofw_pci_cell_info *sc_cell_info; + + struct rman sc_io_rman; + struct rman sc_mem_rman; + bus_space_tag_t sc_memt; + bus_dma_tag_t sc_dmat; + + struct ofw_bus_iinfo sc_pci_iinfo; +}; + +int ofw_pci_init(device_t); +int ofw_pci_attach(device_t); +int ofw_pci_read_ivar(device_t, device_t, int, uintptr_t *); +int ofw_pci_write_ivar(device_t, device_t, int, uintptr_t); +int ofw_pci_route_interrupt(device_t, device_t, int); +int ofw_pci_nranges(phandle_t, struct ofw_pci_cell_info *); + #endif /* _DEV_OFW_OFW_PCI_H_ */ diff --git a/sys/dev/ofw/ofw_subr.c b/sys/dev/ofw/ofw_subr.c index c9b99bfb38f0..be31ccbc0eb4 100644 --- a/sys/dev/ofw/ofw_subr.c +++ b/sys/dev/ofw/ofw_subr.c @@ -38,8 +38,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include +#include +#include static void get_addr_props(phandle_t node, uint32_t *addrp, uint32_t *sizep, int *pcip) diff --git a/sys/powerpc/mpc85xx/pci_mpc85xx.c b/sys/powerpc/mpc85xx/pci_mpc85xx.c index 4397ac0ad778..de55afc4dba2 100644 --- a/sys/powerpc/mpc85xx/pci_mpc85xx.c +++ b/sys/powerpc/mpc85xx/pci_mpc85xx.c @@ -55,15 +55,13 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include +#include #include #include #include -#include - #include "ofw_bus_if.h" #include "pcib_if.h" diff --git a/sys/powerpc/powermac/cpcht.c b/sys/powerpc/powermac/cpcht.c index 765d94624cd0..737e872801f3 100644 --- a/sys/powerpc/powermac/cpcht.c +++ b/sys/powerpc/powermac/cpcht.c @@ -36,7 +36,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include @@ -51,7 +50,7 @@ __FBSDID("$FreeBSD$"); #include #include -#include +#include #include #include diff --git a/sys/powerpc/powermac/grackle.c b/sys/powerpc/powermac/grackle.c index 95d59a1ca744..f0928f3a8a74 100644 --- a/sys/powerpc/powermac/grackle.c +++ b/sys/powerpc/powermac/grackle.c @@ -37,9 +37,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include +#include #include #include @@ -52,7 +52,6 @@ __FBSDID("$FreeBSD$"); #include -#include #include #include diff --git a/sys/powerpc/powermac/uninorthpci.c b/sys/powerpc/powermac/uninorthpci.c index 9da06ffe10de..5cb21c1bb13a 100644 --- a/sys/powerpc/powermac/uninorthpci.c +++ b/sys/powerpc/powermac/uninorthpci.c @@ -34,9 +34,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include +#include #include #include @@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$"); #include -#include #include #include diff --git a/sys/powerpc/powermac/uninorthvar.h b/sys/powerpc/powermac/uninorthvar.h index e08478d7580b..efe169c18c3d 100644 --- a/sys/powerpc/powermac/uninorthvar.h +++ b/sys/powerpc/powermac/uninorthvar.h @@ -30,7 +30,6 @@ #include #include -#include struct uninorth_softc { struct ofw_pci_softc pci_sc; diff --git a/sys/powerpc/pseries/rtas_pci.c b/sys/powerpc/pseries/rtas_pci.c index bb72b710e7b5..1348fc8992dc 100644 --- a/sys/powerpc/pseries/rtas_pci.c +++ b/sys/powerpc/pseries/rtas_pci.c @@ -34,9 +34,9 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include +#include #include #include @@ -53,7 +53,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include "pcib_if.h" -- cgit v1.2.3 From 796a99fc5c91e6a9d476e07e7db4a99c8842e152 Mon Sep 17 00:00:00 2001 From: Ed Maste Date: Thu, 18 Feb 2016 14:17:28 +0000 Subject: Remove dd xfer stats emitted during buildworld They result in gratuitous differences when comparing build log output. --- lib/libc/tests/gen/posix_spawn/Makefile | 2 +- lib/libc/tests/sys/Makefile | 2 +- sys/boot/efi/boot1/Makefile | 3 ++- sys/boot/i386/boot2/Makefile | 4 ++-- sys/boot/i386/pxeldr/Makefile | 2 +- sys/boot/i386/zfsboot/Makefile | 2 +- 6 files changed, 8 insertions(+), 7 deletions(-) diff --git a/lib/libc/tests/gen/posix_spawn/Makefile b/lib/libc/tests/gen/posix_spawn/Makefile index 9bb2cf1cbd57..9b687c6d5fed 100644 --- a/lib/libc/tests/gen/posix_spawn/Makefile +++ b/lib/libc/tests/gen/posix_spawn/Makefile @@ -20,7 +20,7 @@ CLEANFILES+= h_nonexec .include "../../Makefile.netbsd-tests" h_zero: - dd if=/dev/zero of=h_zero bs=1k count=2 + dd if=/dev/zero of=h_zero bs=1k count=2 status=none chmod a+x h_zero CLEANFILES+= h_zero diff --git a/lib/libc/tests/sys/Makefile b/lib/libc/tests/sys/Makefile index efc892ceb6cc..c7b005301527 100644 --- a/lib/libc/tests/sys/Makefile +++ b/lib/libc/tests/sys/Makefile @@ -78,6 +78,6 @@ truncate_test_FILESGRP= wheel CLEANFILES= truncate_test.root_owned truncate_test.root_owned: - dd if=/dev/null bs=1 count=1 of=${.TARGET} + dd if=/dev/null bs=1 count=1 of=${.TARGET} status=none .include diff --git a/sys/boot/efi/boot1/Makefile b/sys/boot/efi/boot1/Makefile index 7c983e3a4171..5455e1a369ed 100644 --- a/sys/boot/efi/boot1/Makefile +++ b/sys/boot/efi/boot1/Makefile @@ -113,7 +113,8 @@ boot1.efifat: boot1.efi uudecode ${.CURDIR}/fat-${MACHINE}.tmpl.bz2.uu mv fat-${MACHINE}.tmpl.bz2 ${.TARGET}.bz2 bzip2 -f -d ${.TARGET}.bz2 - dd if=boot1.efi of=${.TARGET} seek=${BOOT1_OFFSET} conv=notrunc + dd if=boot1.efi of=${.TARGET} seek=${BOOT1_OFFSET} conv=notrunc \ + status=none CLEANFILES= boot1.efi boot1.efifat diff --git a/sys/boot/i386/boot2/Makefile b/sys/boot/i386/boot2/Makefile index 25de8c4bed9e..195206f1c65b 100644 --- a/sys/boot/i386/boot2/Makefile +++ b/sys/boot/i386/boot2/Makefile @@ -72,14 +72,14 @@ CLEANFILES+= boot2 boot2.ld boot2.ldr boot2.bin boot2.out boot2.o \ boot2: boot2.ld @set -- `ls -l boot2.ld`; x=$$((7680-$$5)); \ echo "$$x bytes available"; test $$x -ge 0 - dd if=boot2.ld of=${.TARGET} obs=7680 conv=osync + dd if=boot2.ld of=${.TARGET} obs=7680 conv=osync status=none boot2.ld: boot2.ldr boot2.bin ${BTXKERN} btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l boot2.ldr \ -o ${.TARGET} -P 1 boot2.bin boot2.ldr: - dd if=/dev/zero of=${.TARGET} bs=512 count=1 + dd if=/dev/zero of=${.TARGET} bs=512 count=1 status=none boot2.bin: boot2.out ${OBJCOPY} -S -O binary boot2.out ${.TARGET} diff --git a/sys/boot/i386/pxeldr/Makefile b/sys/boot/i386/pxeldr/Makefile index af3a436b5d7a..c4e008fed021 100644 --- a/sys/boot/i386/pxeldr/Makefile +++ b/sys/boot/i386/pxeldr/Makefile @@ -31,7 +31,7 @@ CLEANFILES+= ${BOOT}.tmp ${BOOT}: ${LDR} ${LOADER} cat ${LDR} ${LOADER} > ${.TARGET}.tmp - dd if=${.TARGET}.tmp of=${.TARGET} obs=2k conv=osync + dd if=${.TARGET}.tmp of=${.TARGET} obs=2k conv=osync status=none rm ${.TARGET}.tmp LDFLAGS+=-e start -Ttext ${ORG} -Wl,-N,-S,--oformat,binary diff --git a/sys/boot/i386/zfsboot/Makefile b/sys/boot/i386/zfsboot/Makefile index 8c94996c569a..5ecf13c25d5e 100644 --- a/sys/boot/i386/zfsboot/Makefile +++ b/sys/boot/i386/zfsboot/Makefile @@ -65,7 +65,7 @@ BOOT2SIZE= 65536 zfsboot2: zfsboot.ld @set -- `ls -l zfsboot.ld`; x=$$((${BOOT2SIZE}-$$5)); \ echo "$$x bytes available"; test $$x -ge 0 - dd if=zfsboot.ld of=${.TARGET} obs=${BOOT2SIZE} conv=osync + dd if=zfsboot.ld of=${.TARGET} obs=${BOOT2SIZE} conv=osync status=none zfsboot.ld: zfsboot.ldr zfsboot.bin ${BTXKERN} btxld -v -E ${ORG2} -f bin -b ${BTXKERN} -l zfsboot.ldr \ -- cgit v1.2.3 From 2a9aa1a6461eb350425707931e762768994e51ff Mon Sep 17 00:00:00 2001 From: Ruslan Bukin Date: Thu, 18 Feb 2016 14:38:37 +0000 Subject: Use medany (Medium/Anywhere) GCC code model for RISC-V. This will allow us to use bigger relocations and all the 64-bit VA space. --- sys/conf/kern.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/sys/conf/kern.mk b/sys/conf/kern.mk index fb72a9789482..8b88d612ede3 100644 --- a/sys/conf/kern.mk +++ b/sys/conf/kern.mk @@ -105,6 +105,7 @@ CFLAGS += -ffixed-x18 .endif .if ${MACHINE_CPUARCH} == "riscv" +CFLAGS.gcc+= -mcmodel=medany INLINE_LIMIT?= 8000 .endif -- cgit v1.2.3 From 787db28adf2ab740c0627ab4f789a5224bf984ad Mon Sep 17 00:00:00 2001 From: Warner Losh Date: Thu, 18 Feb 2016 15:12:52 +0000 Subject: Remove a stray else. It isn't needed (due to the return at the end of the if statement it pairs with). While not an error today, a careless edit in the future could cause problems (though given the nature of this specific code, the problems quite likely would be some variation of "most direct access SCSI storage devices won't attach," which is unlikely to go unnoticed). PVS-Studio: V705 --- sys/cam/scsi/scsi_da.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sys/cam/scsi/scsi_da.c b/sys/cam/scsi/scsi_da.c index 0a7f62754523..9d391829f520 100644 --- a/sys/cam/scsi/scsi_da.c +++ b/sys/cam/scsi/scsi_da.c @@ -3228,7 +3228,8 @@ dadone(struct cam_periph *periph, union ccb *done_ccb) softc->state = DA_STATE_PROBE_RC; xpt_schedule(periph, priority); return; - } else + } + /* * Attach to anything that claims to be a * direct access or optical disk device, -- cgit v1.2.3 From a4ed9e4f78048517a2f31aab3c4a425bd609b66a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20E=C3=9Fer?= Date: Thu, 18 Feb 2016 15:23:25 +0000 Subject: Make WARNS=6 safe. Tested with Clang 3.7.1, GCC 4.2.1 and GCC 4.8.5 on amd64. --- usr.sbin/pciconf/Makefile | 2 -- usr.sbin/pciconf/cap.c | 10 +++++++++- usr.sbin/pciconf/pciconf.c | 17 ++++++++++------- 3 files changed, 19 insertions(+), 10 deletions(-) diff --git a/usr.sbin/pciconf/Makefile b/usr.sbin/pciconf/Makefile index a83973354523..e288654a4d85 100644 --- a/usr.sbin/pciconf/Makefile +++ b/usr.sbin/pciconf/Makefile @@ -5,6 +5,4 @@ PROG= pciconf SRCS= pciconf.c cap.c err.c MAN= pciconf.8 -WARNS?= 3 - .include diff --git a/usr.sbin/pciconf/cap.c b/usr.sbin/pciconf/cap.c index 9ab6dd17ddbd..25132cd2da8a 100644 --- a/usr.sbin/pciconf/cap.c +++ b/usr.sbin/pciconf/cap.c @@ -120,6 +120,9 @@ static void cap_vpd(int fd, struct pci_conf *p, uint8_t ptr) { + (void)fd; /* UNUSED */ + (void)p; /* UNUSED */ + (void)ptr; /* UNUSED */ printf("VPD"); } @@ -172,6 +175,7 @@ cap_pcix(int fd, struct pci_conf *p, uint8_t ptr) } if ((p->pc_hdr & PCIM_HDRTYPE) == 1) return; + max_burst_read = 0; switch (status & PCIXM_STATUS_MAX_READ) { case PCIXM_STATUS_MAX_READ_512: max_burst_read = 512; @@ -186,6 +190,7 @@ cap_pcix(int fd, struct pci_conf *p, uint8_t ptr) max_burst_read = 4096; break; } + max_splits = 0; switch (status & PCIXM_STATUS_MAX_SPLITS) { case PCIXM_STATUS_MAX_SPLITS_1: max_splits = 1; @@ -518,6 +523,9 @@ static void cap_sata(int fd, struct pci_conf *p, uint8_t ptr) { + (void)fd; /* UNUSED */ + (void)p; /* UNUSED */ + (void)ptr; /* UNUSED */ printf("SATA Index-Data Pair"); } @@ -759,7 +767,7 @@ ecap_sriov(int fd, struct pci_conf *p, uint16_t ptr, uint8_t ver) print_bar(fd, p, "iov bar ", ptr + PCIR_SRIOV_BAR(i)); } -struct { +static struct { uint16_t id; const char *name; } ecap_names[] = { diff --git a/usr.sbin/pciconf/pciconf.c b/usr.sbin/pciconf/pciconf.c index 194da6b3d9e8..85b5e870fed7 100644 --- a/usr.sbin/pciconf/pciconf.c +++ b/usr.sbin/pciconf/pciconf.c @@ -67,7 +67,7 @@ struct pci_vendor_info char *desc; }; -TAILQ_HEAD(,pci_vendor_info) pci_vendors; +static TAILQ_HEAD(,pci_vendor_info) pci_vendors; static struct pcisel getsel(const char *str); static void list_bridge(int fd, struct pci_conf *p); @@ -896,16 +896,18 @@ getdevice(const char *name) static struct pcisel parsesel(const char *str) { - char *ep = strchr(str, '@'); - char *epbase; + const char *ep; + const char *epbase; + char *eppos; struct pcisel sel; unsigned long selarr[4]; int i; - if (ep == NULL) - ep = (char *)str; - else + ep = strchr(str, '@'); + if (ep != NULL) ep++; + else + ep = str; epbase = ep; @@ -913,7 +915,8 @@ parsesel(const char *str) ep += 3; i = 0; do { - selarr[i++] = strtoul(ep, &ep, 10); + selarr[i++] = strtoul(ep, &eppos, 10); + ep = eppos; } while ((*ep == ':' || *ep == '.') && *++ep != '\0' && i < 4); if (i > 2) -- cgit v1.2.3 From 229f3f0d9caf191a7acbe1aa4cb7241e02c2eeba Mon Sep 17 00:00:00 2001 From: Ruslan Bukin Date: Thu, 18 Feb 2016 15:28:57 +0000 Subject: Increase kernel and user VA space. This allows us to boot with more than 128MB of physical memory. Sponsored by: DARPA, AFRL Sponsored by: HEIF5 --- sys/riscv/include/vmparam.h | 37 ++++++++++++++++++++----------------- sys/riscv/riscv/locore.S | 1 - sys/riscv/riscv/pmap.c | 28 ++++++++++++++-------------- 3 files changed, 34 insertions(+), 32 deletions(-) diff --git a/sys/riscv/include/vmparam.h b/sys/riscv/include/vmparam.h index 08d3c3bd1bea..4f2761561fba 100644 --- a/sys/riscv/include/vmparam.h +++ b/sys/riscv/include/vmparam.h @@ -43,19 +43,19 @@ * Virtual memory related constants, all in bytes */ #ifndef MAXTSIZ -#define MAXTSIZ (32*1024*1024) /* max text size */ +#define MAXTSIZ (1*1024*1024*1024) /* max text size */ #endif #ifndef DFLDSIZ #define DFLDSIZ (128*1024*1024) /* initial data size limit */ #endif #ifndef MAXDSIZ -#define MAXDSIZ (128*1024*1024) /* max data size */ +#define MAXDSIZ (1*1024*1024*1024) /* max data size */ #endif #ifndef DFLSSIZ -#define DFLSSIZ (2*1024*1024) /* initial stack size limit */ +#define DFLSSIZ (128*1024*1024) /* initial stack size limit */ #endif #ifndef MAXSSIZ -#define MAXSSIZ (8*1024*1024) /* max stack size */ +#define MAXSSIZ (1*1024*1024*1024) /* max stack size */ #endif #ifndef SGROWSIZ #define SGROWSIZ (128*1024) /* amount to grow stack */ @@ -128,12 +128,12 @@ * We limit the size of the two spaces to 39 bits each. * * Upper region: 0xffffffffffffffff - * 0xffffffffc0000000 + * 0xffffff8000000000 * - * Hole: 0xffffffffbfffffff - * 0x0000000080000000 + * Hole: 0xffffff7fffffffff + * 0x0000008000000000 * - * Lower region: 0x000000007fffffff + * Lower region: 0x0000007fffffffff * 0x0000000000000000 * * We use the upper region for the kernel, and the lower region for userland. @@ -152,19 +152,20 @@ #define VM_MIN_ADDRESS (0x0000000000000000UL) #define VM_MAX_ADDRESS (0xffffffffffffffffUL) -/* 256 MiB of kernel addresses */ -#define VM_MIN_KERNEL_ADDRESS (0xffffffffc0000000UL) -#define VM_MAX_KERNEL_ADDRESS (0xffffffffcfffffffUL) +/* 32 GiB of kernel addresses */ +#define VM_MIN_KERNEL_ADDRESS (0xffffff8000000000UL) +#define VM_MAX_KERNEL_ADDRESS (0xffffff8800000000UL) -/* Direct Map for 512 MiB of PA: 0x0 - 0x1fffffff */ -#define DMAP_MIN_ADDRESS (0xffffffffd0000000UL) -#define DMAP_MAX_ADDRESS (0xffffffffefffffffUL) +/* Direct Map for 128 GiB of PA: 0x0 - 0x1fffffffff */ +#define DMAP_MIN_ADDRESS (0xffffffc000000000UL) +#define DMAP_MAX_ADDRESS (0xffffffdfffffffffUL) #define DMAP_MIN_PHYSADDR (0x0000000000000000UL) #define DMAP_MAX_PHYSADDR (DMAP_MAX_ADDRESS - DMAP_MIN_ADDRESS) /* True if pa is in the dmap range */ -#define PHYS_IN_DMAP(pa) ((pa) <= DMAP_MAX_PHYSADDR) +#define PHYS_IN_DMAP(pa) ((pa) >= DMAP_MIN_PHYSADDR && \ + (pa) <= DMAP_MAX_PHYSADDR) /* True if va is in the dmap range */ #define VIRT_IN_DMAP(va) ((va) >= DMAP_MIN_ADDRESS && \ (va) <= DMAP_MAX_ADDRESS) @@ -186,13 +187,15 @@ }) #define VM_MIN_USER_ADDRESS (0x0000000000000000UL) -#define VM_MAX_USER_ADDRESS (0x0000000080000000UL) +#define VM_MAX_USER_ADDRESS (0x0000008000000000UL) #define VM_MINUSER_ADDRESS (VM_MIN_USER_ADDRESS) #define VM_MAXUSER_ADDRESS (VM_MAX_USER_ADDRESS) #define KERNBASE (VM_MIN_KERNEL_ADDRESS) -#define USRSTACK (VM_MAX_USER_ADDRESS) +#define SHAREDPAGE (VM_MAXUSER_ADDRESS - PAGE_SIZE) +#define USRSTACK SHAREDPAGE + #define KERNENTRY (0x200) /* diff --git a/sys/riscv/riscv/locore.S b/sys/riscv/riscv/locore.S index 8dc424e7fc4e..0744167c011f 100644 --- a/sys/riscv/riscv/locore.S +++ b/sys/riscv/riscv/locore.S @@ -102,7 +102,6 @@ _start: /* finish building ring */ la t0, hardstack_end - sub t0, t0, s11 csrw mscratch, t0 la t0, mentry diff --git a/sys/riscv/riscv/pmap.c b/sys/riscv/riscv/pmap.c index e4ca19fff330..ef25b16f3c35 100644 --- a/sys/riscv/riscv/pmap.c +++ b/sys/riscv/riscv/pmap.c @@ -445,31 +445,33 @@ pmap_early_vtophys(vm_offset_t l1pt, vm_offset_t va) } static void -pmap_bootstrap_dmap(vm_offset_t l2pt) +pmap_bootstrap_dmap(vm_offset_t l1pt, vm_paddr_t kernstart) { vm_offset_t va; vm_paddr_t pa; - pd_entry_t *l2; - u_int l2_slot; + pd_entry_t *l1; + u_int l1_slot; pt_entry_t entry; u_int pn; + pa = kernstart & ~L1_OFFSET; va = DMAP_MIN_ADDRESS; - l2 = (pd_entry_t *)l2pt; - l2_slot = pmap_l2_index(DMAP_MIN_ADDRESS); + l1 = (pd_entry_t *)l1pt; + l1_slot = pmap_l1_index(DMAP_MIN_ADDRESS); - for (pa = 0; va < DMAP_MAX_ADDRESS; pa += L2_SIZE, va += L2_SIZE, l2_slot++) { - KASSERT(l2_slot < Ln_ENTRIES, ("Invalid L2 index")); + for (; va < DMAP_MAX_ADDRESS; + pa += L1_SIZE, va += L1_SIZE, l1_slot++) { + KASSERT(l1_slot < Ln_ENTRIES, ("Invalid L1 index")); /* superpages */ - pn = ((pa >> L2_SHIFT) & Ln_ADDR_MASK); + pn = ((pa >> L1_SHIFT) & Ln_ADDR_MASK); entry = (PTE_VALID | (PTE_TYPE_SRWX << PTE_TYPE_S)); - entry |= (pn << PTE_PPN1_S); + entry |= (pn << PTE_PPN2_S); - pmap_load_store(&l2[l2_slot], entry); + pmap_load_store(&l1[l1_slot], entry); } - cpu_dcache_wb_range((vm_offset_t)l2, PAGE_SIZE); + cpu_dcache_wb_range((vm_offset_t)l1, PAGE_SIZE); cpu_tlb_flushID(); } @@ -485,7 +487,6 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) vm_offset_t va, freemempos; vm_offset_t dpcpu, msgbufpv; vm_paddr_t pa, min_pa; - vm_offset_t l2pt; int i; kern_delta = KERNBASE - kernstart; @@ -520,8 +521,7 @@ pmap_bootstrap(vm_offset_t l1pt, vm_paddr_t kernstart, vm_size_t kernlen) } /* Create a direct map region early so we can use it for pa -> va */ - l2pt = (l1pt + PAGE_SIZE); - pmap_bootstrap_dmap(l2pt); + pmap_bootstrap_dmap(l1pt, min_pa); va = KERNBASE; pa = KERNBASE - kern_delta; -- cgit v1.2.3 From 910905c74fc03616577797cfd5d69e1a57256162 Mon Sep 17 00:00:00 2001 From: Zbigniew Bodek Date: Thu, 18 Feb 2016 15:44:45 +0000 Subject: Fix build for i386 and arm64 after r295755 - Take bus_space_tag_t type into consideration when returning default, zero value. - Include missing rman.h required by ofw_pci.h --- sys/dev/ofw/ofw_subr.c | 1 + sys/kern/subr_bus.c | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/sys/dev/ofw/ofw_subr.c b/sys/dev/ofw/ofw_subr.c index be31ccbc0eb4..e9b66c284bca 100644 --- a/sys/dev/ofw/ofw_subr.c +++ b/sys/dev/ofw/ofw_subr.c @@ -34,6 +34,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 3e4568b58e66..22cddea6ed74 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -4107,7 +4107,7 @@ bus_generic_get_bus_tag(device_t dev, device_t child) /* Propagate up the bus hierarchy until someone handles it. */ if (dev->parent != NULL) return (BUS_GET_BUS_TAG(dev->parent, child)); - return (NULL); + return ((bus_space_tag_t)0); } /** @@ -4604,7 +4604,7 @@ bus_get_bus_tag(device_t dev) parent = device_get_parent(dev); if (parent == NULL) - return (NULL); + return ((bus_space_tag_t)0); return (BUS_GET_BUS_TAG(parent, dev)); } -- cgit v1.2.3 From a050ef0997313fb0d975843e536404a2ff7e9916 Mon Sep 17 00:00:00 2001 From: Maxim Sobolev Date: Thu, 18 Feb 2016 18:41:40 +0000 Subject: Right now, the "virtual hole" API feature of lseek(2) is very vaguely documented and easy to miss. At the same time, it's pretty important for anyone who is trying to use SEEK_HOLE/SEEK_DATA in real app. Try to bridge that gap by making that description more pronounced and also document how it affects failure codes. Reviewed by: kib Differential Revision: https://reviews.freebsd.org/D5162 --- lib/libc/sys/lseek.2 | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/lib/libc/sys/lseek.2 b/lib/libc/sys/lseek.2 index 349940a2a5b8..ed7513885c31 100644 --- a/lib/libc/sys/lseek.2 +++ b/lib/libc/sys/lseek.2 @@ -131,8 +131,14 @@ Applications can use .Dv SEEK_HOLE to optimise their behavior for ranges of zeros, but must not depend on it to find all such ranges in a file. +Each file is presented as having a zero-size virtual hole at the very +end of the file. The existence of a hole at the end of every data region allows for easy -programming and implies that a virtual hole exists at the end of the file. +programming and also provides compatibility to the original imlementation +in Solaris. +It also causes the current file size (i.e. end-of-file offset) to be returned +to indicate that there are no more holes past the supplied +.Fa offset . Applications should use .Fn fpathconf _PC_MIN_HOLE_SIZE or @@ -176,9 +182,11 @@ be negative for a non-character special file. For .Dv SEEK_DATA , there are no more data regions past the supplied offset. -For -.Dv SEEK_HOLE , -there are no more holes past the supplied offset. +Due to existence of the hole at the end of the file, for +.Dv SEEK_HOLE +this error is only returned when the +.Fa offset +already points to the end-of-file position. .It Bq Er EOVERFLOW The resulting file offset would be a value which cannot be represented correctly in an object of type -- cgit v1.2.3 From 24183025a54d7864ad74efb1656b8feb9c355793 Mon Sep 17 00:00:00 2001 From: Benjamin Kaduk Date: Thu, 18 Feb 2016 18:50:03 +0000 Subject: Bump .Dd for r295764 Also fix a spelling and grammar nit while here. --- lib/libc/sys/lseek.2 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/lib/libc/sys/lseek.2 b/lib/libc/sys/lseek.2 index ed7513885c31..017dc54227eb 100644 --- a/lib/libc/sys/lseek.2 +++ b/lib/libc/sys/lseek.2 @@ -28,7 +28,7 @@ .\" @(#)lseek.2 8.3 (Berkeley) 4/19/94 .\" $FreeBSD$ .\" -.Dd May 26, 2012 +.Dd February 18, 2016 .Dt LSEEK 2 .Os .Sh NAME @@ -134,9 +134,9 @@ find all such ranges in a file. Each file is presented as having a zero-size virtual hole at the very end of the file. The existence of a hole at the end of every data region allows for easy -programming and also provides compatibility to the original imlementation +programming and also provides compatibility to the original implementation in Solaris. -It also causes the current file size (i.e. end-of-file offset) to be returned +It also causes the current file size (i.e., end-of-file offset) to be returned to indicate that there are no more holes past the supplied .Fa offset . Applications should use -- cgit v1.2.3 From cd82d21b2e3cc91d31dab1e1195b1b7f85efd2c6 Mon Sep 17 00:00:00 2001 From: Gleb Smirnoff Date: Thu, 18 Feb 2016 19:05:30 +0000 Subject: Fix obvious typo, that lead to incorrect sorting. Found by: PVS-Studio --- sys/netpfil/ipfw/ip_fw_sockopt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/netpfil/ipfw/ip_fw_sockopt.c b/sys/netpfil/ipfw/ip_fw_sockopt.c index 070aed389f7e..25ac16eb31c2 100644 --- a/sys/netpfil/ipfw/ip_fw_sockopt.c +++ b/sys/netpfil/ipfw/ip_fw_sockopt.c @@ -2890,7 +2890,7 @@ compare_sh(const void *_a, const void *_b) if ((uintptr_t)a->handler < (uintptr_t)b->handler) return (-1); - else if ((uintptr_t)b->handler > (uintptr_t)b->handler) + else if ((uintptr_t)a->handler > (uintptr_t)b->handler) return (1); return (0); -- cgit v1.2.3 From c2a9e596ed2f576f8cd2a972fbc3224efc2c1d74 Mon Sep 17 00:00:00 2001 From: Jung-uk Kim Date: Thu, 18 Feb 2016 19:37:39 +0000 Subject: Silence VPS-Studio errors (V512). These buffer underflows are intentional. --- sys/compat/x86bios/x86bios.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sys/compat/x86bios/x86bios.c b/sys/compat/x86bios/x86bios.c index 5cd31e8a71bb..e86c965877f2 100644 --- a/sys/compat/x86bios/x86bios.c +++ b/sys/compat/x86bios/x86bios.c @@ -586,7 +586,7 @@ x86bios_call(struct x86regs *regs, uint16_t seg, uint16_t off) X86BIOS_TRACE(Calling 0x%06x, (seg << 4) + off, regs); mtx_lock(&x86bios_lock); - memcpy(&x86bios_emu.x86, regs, sizeof(*regs)); + memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs)); x86bios_fault = 0; spinlock_enter(); x86emu_exec_call(&x86bios_emu, seg, off); @@ -628,7 +628,7 @@ x86bios_intr(struct x86regs *regs, int intno) X86BIOS_TRACE(Calling INT 0x%02x, intno, regs); mtx_lock(&x86bios_lock); - memcpy(&x86bios_emu.x86, regs, sizeof(*regs)); + memcpy((struct x86regs *)&x86bios_emu.x86, regs, sizeof(*regs)); x86bios_fault = 0; spinlock_enter(); x86emu_exec_intr(&x86bios_emu, intno); -- cgit v1.2.3 From a59c2129f341a035a279b679a3221789d7fc6bf5 Mon Sep 17 00:00:00 2001 From: Alan Somers Date: Thu, 18 Feb 2016 20:08:01 +0000 Subject: Fix compiler warnings in iostat Raise WARNS from 1 to 6 (the default) Fix warnings: * Use C99 designated initializers for structs, and initialize all fields * Mark global variables as static * Mark unused function arguments * Be careful about signed/unsigned comparisons Reviewed by: eadler MFC after: 4 weeks Sponsored by: Spectra Logic Corp Differential Revision: https://reviews.freebsd.org/D5328 --- usr.sbin/iostat/Makefile | 2 -- usr.sbin/iostat/iostat.c | 47 +++++++++++++++++++++++++---------------------- 2 files changed, 25 insertions(+), 24 deletions(-) diff --git a/usr.sbin/iostat/Makefile b/usr.sbin/iostat/Makefile index dfbf69dc724c..4d74fe163d0b 100644 --- a/usr.sbin/iostat/Makefile +++ b/usr.sbin/iostat/Makefile @@ -6,6 +6,4 @@ MAN= iostat.8 LIBADD= devstat kvm m -WARNS?= 1 - .include diff --git a/usr.sbin/iostat/iostat.c b/usr.sbin/iostat/iostat.c index 170ce0d30b98..7610a7fecb5c 100644 --- a/usr.sbin/iostat/iostat.c +++ b/usr.sbin/iostat/iostat.c @@ -117,30 +117,34 @@ #include #include -struct nlist namelist[] = { +static struct nlist namelist[] = { #define X_TTY_NIN 0 - { "_tty_nin" }, + { .n_name = "_tty_nin", + .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, #define X_TTY_NOUT 1 - { "_tty_nout" }, + { .n_name = "_tty_nout", + .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, #define X_BOOTTIME 2 - { "_boottime" }, + { .n_name = "_boottime", + .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, #define X_END 2 - { NULL }, + { .n_name = NULL, + .n_type = 0, .n_other = 0, .n_desc = 0, .n_value = 0 }, }; #define IOSTAT_DEFAULT_ROWS 20 /* Traditional default `wrows' */ -struct statinfo cur, last; -int num_devices; -struct device_selection *dev_select; -int maxshowdevs; -volatile sig_atomic_t headercount; -volatile sig_atomic_t wresized; /* Tty resized, when non-zero. */ -volatile sig_atomic_t alarm_rang; -volatile sig_atomic_t return_requested; -unsigned short wrows; /* Current number of tty rows. */ -int dflag = 0, Iflag = 0, Cflag = 0, Tflag = 0, oflag = 0, Kflag = 0; -int xflag = 0, zflag = 0; +static struct statinfo cur, last; +static int num_devices; +static struct device_selection *dev_select; +static int maxshowdevs; +static volatile sig_atomic_t headercount; +static volatile sig_atomic_t wresized; /* Tty resized, when non-zero. */ +static volatile sig_atomic_t alarm_rang; +static volatile sig_atomic_t return_requested; +static unsigned short wrows; /* Current number of tty rows. */ +static int dflag = 0, Iflag = 0, Cflag = 0, Tflag = 0, oflag = 0, Kflag = 0; +static int xflag = 0, zflag = 0; /* local function declarations */ static void usage(void); @@ -650,7 +654,7 @@ main(int argc, char **argv) * Force a header to be prepended to the next output. */ void -needhdr(int signo) +needhdr(int signo __unused) { headercount = 1; @@ -662,7 +666,7 @@ needhdr(int signo) * prepended to the next output. */ void -needresize(int signo) +needresize(int signo __unused) { wresized = 1; @@ -673,7 +677,7 @@ needresize(int signo) * Record the alarm so the main loop can break its sleep */ void -alarm_clock(int signo) +alarm_clock(int signo __unused) { alarm_rang = 1; } @@ -682,7 +686,7 @@ alarm_clock(int signo) * Request that the main loop exit soon */ void -needreturn(int signo) +needreturn(int signo __unused) { return_requested = 1; } @@ -998,8 +1002,7 @@ readvar(kvm_t *kd, const char *name, int nlid, void *ptr, size_t len) warnx("kvm_read(%s): %s", namelist[nlid].n_name, kvm_geterr(kd)); return (1); - } - if (nbytes != len) { + } else if ((size_t)nbytes != len) { warnx("kvm_read(%s): expected %zu bytes, got %zd bytes", namelist[nlid].n_name, len, nbytes); return (1); -- cgit v1.2.3 From 873e155c03b93c1c27669394f1846b12775cac02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20E=C3=9Fer?= Date: Thu, 18 Feb 2016 20:20:36 +0000 Subject: Use __unused instead of casting to void to silence the unused parameter warning. Fix the indentation of 2 lines to conform with the style of this file. Submitted by: jhb --- usr.sbin/pciconf/cap.c | 10 ++-------- usr.sbin/pciconf/pciconf.c | 4 ++-- 2 files changed, 4 insertions(+), 10 deletions(-) diff --git a/usr.sbin/pciconf/cap.c b/usr.sbin/pciconf/cap.c index 25132cd2da8a..966ef9a0659f 100644 --- a/usr.sbin/pciconf/cap.c +++ b/usr.sbin/pciconf/cap.c @@ -117,12 +117,9 @@ cap_agp(int fd, struct pci_conf *p, uint8_t ptr) } static void -cap_vpd(int fd, struct pci_conf *p, uint8_t ptr) +cap_vpd(int fd __unused, struct pci_conf *p __unused, uint8_t ptr __unused) { - (void)fd; /* UNUSED */ - (void)p; /* UNUSED */ - (void)ptr; /* UNUSED */ printf("VPD"); } @@ -520,12 +517,9 @@ cap_msix(int fd, struct pci_conf *p, uint8_t ptr) } static void -cap_sata(int fd, struct pci_conf *p, uint8_t ptr) +cap_sata(int fd __unused, struct pci_conf *p __unused, uint8_t ptr __unused) { - (void)fd; /* UNUSED */ - (void)p; /* UNUSED */ - (void)ptr; /* UNUSED */ printf("SATA Index-Data Pair"); } diff --git a/usr.sbin/pciconf/pciconf.c b/usr.sbin/pciconf/pciconf.c index 85b5e870fed7..e743a891a2a6 100644 --- a/usr.sbin/pciconf/pciconf.c +++ b/usr.sbin/pciconf/pciconf.c @@ -915,8 +915,8 @@ parsesel(const char *str) ep += 3; i = 0; do { - selarr[i++] = strtoul(ep, &eppos, 10); - ep = eppos; + selarr[i++] = strtoul(ep, &eppos, 10); + ep = eppos; } while ((*ep == ':' || *ep == '.') && *++ep != '\0' && i < 4); if (i > 2) -- cgit v1.2.3 From 6df7c000d308b08575ffb3e3ea7bf5f16a2e8bbe Mon Sep 17 00:00:00 2001 From: Michael Tuexen Date: Thu, 18 Feb 2016 21:05:04 +0000 Subject: Fix reporting of mapped addressed in getpeername() and getsockname() for IPv6 SCTP sockets. This bugs were found because of an issue reported by PVS / D5245. --- sys/netinet6/sctp6_usrreq.c | 49 ++++++++++++++++++++++----------------------- 1 file changed, 24 insertions(+), 25 deletions(-) diff --git a/sys/netinet6/sctp6_usrreq.c b/sys/netinet6/sctp6_usrreq.c index 40c1b411697e..176fc97956aa 100644 --- a/sys/netinet6/sctp6_usrreq.c +++ b/sys/netinet6/sctp6_usrreq.c @@ -1008,7 +1008,9 @@ sctp6_getaddr(struct socket *so, struct sockaddr **addr) stcb = LIST_FIRST(&inp->sctp_asoc_list); if (stcb == NULL) { - goto notConn6; + SCTP_INP_RUNLOCK(inp); + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); + return (ENOENT); } fnd = 0; sin_a6 = NULL; @@ -1025,7 +1027,9 @@ sctp6_getaddr(struct socket *so, struct sockaddr **addr) } if ((!fnd) || (sin_a6 == NULL)) { /* punt */ - goto notConn6; + SCTP_INP_RUNLOCK(inp); + SCTP_LTRACE_ERR_RET(inp, NULL, NULL, SCTP_FROM_SCTP6_USRREQ, ENOENT); + return (ENOENT); } vrf_id = inp->def_vrf_id; sctp_ifa = sctp_source_address_selection(inp, stcb, (sctp_route_t *) & net->ro, net, 0, vrf_id); @@ -1034,7 +1038,6 @@ sctp6_getaddr(struct socket *so, struct sockaddr **addr) } } else { /* For the bound all case you get back 0 */ - notConn6: memset(&sin6->sin6_addr, 0, sizeof(sin6->sin6_addr)); } } else { @@ -1135,10 +1138,6 @@ sctp6_peeraddr(struct socket *so, struct sockaddr **addr) static int sctp6_in6getaddr(struct socket *so, struct sockaddr **nam) { -#ifdef INET - struct sockaddr *addr; - -#endif struct in6pcb *inp6 = sotoin6pcb(so); int error; @@ -1150,19 +1149,21 @@ sctp6_in6getaddr(struct socket *so, struct sockaddr **nam) error = sctp6_getaddr(so, nam); #ifdef INET if (error) { + struct sockaddr_in6 *sin6; + /* try v4 next if v6 failed */ error = sctp_ingetaddr(so, nam); if (error) { return (error); } - addr = *nam; - /* if I'm V6ONLY, convert it to v4-mapped */ - if (SCTP_IPV6_V6ONLY(inp6)) { - struct sockaddr_in6 sin6; - - in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6); - memcpy(addr, &sin6, sizeof(struct sockaddr_in6)); + SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); + if (sin6 == NULL) { + SCTP_FREE_SONAME(*nam); + return (ENOMEM); } + in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6); + SCTP_FREE_SONAME(*nam); + *nam = (struct sockaddr *)sin6; } #endif return (error); @@ -1172,10 +1173,6 @@ sctp6_in6getaddr(struct socket *so, struct sockaddr **nam) static int sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam) { -#ifdef INET - struct sockaddr *addr; - -#endif struct in6pcb *inp6 = sotoin6pcb(so); int error; @@ -1187,19 +1184,21 @@ sctp6_getpeeraddr(struct socket *so, struct sockaddr **nam) error = sctp6_peeraddr(so, nam); #ifdef INET if (error) { + struct sockaddr_in6 *sin6; + /* try v4 next if v6 failed */ error = sctp_peeraddr(so, nam); if (error) { return (error); } - addr = *nam; - /* if I'm V6ONLY, convert it to v4-mapped */ - if (SCTP_IPV6_V6ONLY(inp6)) { - struct sockaddr_in6 sin6; - - in6_sin_2_v4mapsin6((struct sockaddr_in *)addr, &sin6); - memcpy(addr, &sin6, sizeof(struct sockaddr_in6)); + SCTP_MALLOC_SONAME(sin6, struct sockaddr_in6 *, sizeof *sin6); + if (sin6 == NULL) { + SCTP_FREE_SONAME(*nam); + return (ENOMEM); } + in6_sin_2_v4mapsin6((struct sockaddr_in *)*nam, sin6); + SCTP_FREE_SONAME(*nam); + *nam = (struct sockaddr *)sin6; } #endif return (error); -- cgit v1.2.3 From fdc4c9d067d1a24020b5a6c8f195666655b29176 Mon Sep 17 00:00:00 2001 From: Michael Tuexen Date: Thu, 18 Feb 2016 21:21:45 +0000 Subject: Add some protection code. CID: 1331893 MFC after: 3 days --- sys/netinet/sctp_input.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/sys/netinet/sctp_input.c b/sys/netinet/sctp_input.c index ddb563c6e0cf..a205f2f5e609 100644 --- a/sys/netinet/sctp_input.c +++ b/sys/netinet/sctp_input.c @@ -365,8 +365,10 @@ sctp_process_init(struct sctp_init_chunk *cp, struct sctp_tcb *stcb) } SCTP_TCB_SEND_UNLOCK(stcb); asoc->streamoutcnt = asoc->pre_open_streams; - for (i = 0; i < asoc->streamoutcnt; i++) { - asoc->strmout[i].state = SCTP_STREAM_OPEN; + if (asoc->strmout) { + for (i = 0; i < asoc->streamoutcnt; i++) { + asoc->strmout[i].state = SCTP_STREAM_OPEN; + } } /* EY - nr_sack: initialize highest tsn in nr_mapping_array */ asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map; -- cgit v1.2.3 From 861f6d119637b22ed63027800689a6c60e4873a8 Mon Sep 17 00:00:00 2001 From: Michael Tuexen Date: Thu, 18 Feb 2016 21:33:10 +0000 Subject: Add protection code. MFC after: 3 days CID: 748858 --- sys/netinet/sctp_output.c | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/sys/netinet/sctp_output.c b/sys/netinet/sctp_output.c index 39cd388e5b78..a9001064e471 100644 --- a/sys/netinet/sctp_output.c +++ b/sys/netinet/sctp_output.c @@ -3222,12 +3222,14 @@ plan_d: } } #ifdef INET - if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) { - stcb->asoc.scope.ipv4_local_scope = 1; - retried = 1; - goto again_with_private_addresses_allowed; - } else if (retried == 1) { - stcb->asoc.scope.ipv4_local_scope = 0; + if (stcb) { + if ((retried == 0) && (stcb->asoc.scope.ipv4_local_scope == 0)) { + stcb->asoc.scope.ipv4_local_scope = 1; + retried = 1; + goto again_with_private_addresses_allowed; + } else if (retried == 1) { + stcb->asoc.scope.ipv4_local_scope = 0; + } } #endif out: -- cgit v1.2.3 From 08e9106881e544d2cd4f17719ad9957e870b4512 Mon Sep 17 00:00:00 2001 From: Jason Helfman Date: Thu, 18 Feb 2016 22:40:24 +0000 Subject: - add dma(8) to examples in mailer.conf(5) PR: 207026 Submitted by: lifanov@mail.lifanov.com Approved by: wblock (mentor) Differential Revision: https://reviews.freebsd.org/D5259 --- share/man/man5/mailer.conf.5 | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/share/man/man5/mailer.conf.5 b/share/man/man5/mailer.conf.5 index dc55ae773e46..7bb57fc7d993 100644 --- a/share/man/man5/mailer.conf.5 +++ b/share/man/man5/mailer.conf.5 @@ -31,7 +31,7 @@ .\" .\" $FreeBSD$ .\" -.Dd October 8, 2010 +.Dd February 18, 2016 .Dt MAILER.CONF 5 .Os .Sh NAME @@ -139,7 +139,21 @@ Note the use of additional arguments. sendmail /usr/local/bin/mini_sendmail -srelayhost send-mail /usr/local/bin/mini_sendmail -srelayhost .Ed +.Pp +Using +.Xr dma 8 +to replace +.Xr sendmail 8 : +.Bd -literal -offset indent +# Execute dma instead of sendmail +sendmail /usr/libexec/dma +send-mail /usr/libexec/dma +mailq /usr/libexec/dma +newaliases /usr/libexec/dma +rmail /usr/libexec/dma +.Ed .Sh SEE ALSO +.Xr dma 8 , .Xr mail 1 , .Xr mailq 1 , .Xr newaliases 1 , -- cgit v1.2.3 From 7de2983dd0049ef3ae56f0ea87c24fe229382589 Mon Sep 17 00:00:00 2001 From: Jung-uk Kim Date: Thu, 18 Feb 2016 23:00:01 +0000 Subject: Silence VPS-Studio errors (V646). These is no functional change. --- sys/contrib/x86emu/x86emu.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sys/contrib/x86emu/x86emu.c b/sys/contrib/x86emu/x86emu.c index 41e7c4a8d944..bc7b3862b6bc 100644 --- a/sys/contrib/x86emu/x86emu.c +++ b/sys/contrib/x86emu/x86emu.c @@ -7003,7 +7003,7 @@ rol_byte(struct x86emu *emu, uint8_t d, uint8_t s) CONDITIONAL_SET_FLAG(s == 1 && XOR2((res & 0x1) + ((res >> 6) & 0x2)), F_OF); - } if (s != 0) { + } else if (s != 0) { /* set the new carry flag, Note that it is the low order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x1, F_CF); @@ -7029,7 +7029,7 @@ rol_word(struct x86emu *emu, uint16_t d, uint8_t s) CONDITIONAL_SET_FLAG(s == 1 && XOR2((res & 0x1) + ((res >> 14) & 0x2)), F_OF); - } if (s != 0) { + } else if (s != 0) { /* set the new carry flag, Note that it is the low order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x1, F_CF); @@ -7055,7 +7055,7 @@ rol_long(struct x86emu *emu, uint32_t d, uint8_t s) CONDITIONAL_SET_FLAG(s == 1 && XOR2((res & 0x1) + ((res >> 30) & 0x2)), F_OF); - } if (s != 0) { + } else if (s != 0) { /* set the new carry flag, Note that it is the low order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x1, F_CF); -- cgit v1.2.3 From 33b5cd539e1bd414d6452188aae60c094cd42fa6 Mon Sep 17 00:00:00 2001 From: Jung-uk Kim Date: Thu, 18 Feb 2016 23:03:37 +0000 Subject: Optimize ROL and ROR emulations and fix comments. --- sys/contrib/x86emu/x86emu.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/sys/contrib/x86emu/x86emu.c b/sys/contrib/x86emu/x86emu.c index bc7b3862b6bc..60a154952f27 100644 --- a/sys/contrib/x86emu/x86emu.c +++ b/sys/contrib/x86emu/x86emu.c @@ -6995,15 +6995,13 @@ rol_byte(struct x86emu *emu, uint8_t d, uint8_t s) mask = (1 << cnt) - 1; res |= (d >> (8 - cnt)) & mask; - /* set the new carry flag, Note that it is the low order bit - * of the result!!! */ - CONDITIONAL_SET_FLAG(res & 0x1, F_CF); /* OVERFLOW is set *IFF* s==1, then it is the xor of CF and * the most significant bit. Blecck. */ CONDITIONAL_SET_FLAG(s == 1 && XOR2((res & 0x1) + ((res >> 6) & 0x2)), F_OF); - } else if (s != 0) { + } + if (s != 0) { /* set the new carry flag, Note that it is the low order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x1, F_CF); @@ -7025,11 +7023,11 @@ rol_word(struct x86emu *emu, uint16_t d, uint8_t s) res = (d << cnt); mask = (1 << cnt) - 1; res |= (d >> (16 - cnt)) & mask; - CONDITIONAL_SET_FLAG(res & 0x1, F_CF); CONDITIONAL_SET_FLAG(s == 1 && XOR2((res & 0x1) + ((res >> 14) & 0x2)), F_OF); - } else if (s != 0) { + } + if (s != 0) { /* set the new carry flag, Note that it is the low order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x1, F_CF); @@ -7051,11 +7049,11 @@ rol_long(struct x86emu *emu, uint32_t d, uint8_t s) res = (d << cnt); mask = (1 << cnt) - 1; res |= (d >> (32 - cnt)) & mask; - CONDITIONAL_SET_FLAG(res & 0x1, F_CF); CONDITIONAL_SET_FLAG(s == 1 && XOR2((res & 0x1) + ((res >> 30) & 0x2)), F_OF); - } else if (s != 0) { + } + if (s != 0) { /* set the new carry flag, Note that it is the low order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x1, F_CF); @@ -7093,14 +7091,12 @@ ror_byte(struct x86emu *emu, uint8_t d, uint8_t s) mask = (1 << (8 - cnt)) - 1; res |= (d >> (cnt)) & mask; - /* set the new carry flag, Note that it is the low order bit - * of the result!!! */ - CONDITIONAL_SET_FLAG(res & 0x80, F_CF); /* OVERFLOW is set *IFF* s==1, then it is the xor of the two * most significant bits. Blecck. */ CONDITIONAL_SET_FLAG(s == 1 && XOR2(res >> 6), F_OF); - } else if (s != 0) { - /* set the new carry flag, Note that it is the low order bit + } + if (s != 0) { + /* set the new carry flag, Note that it is the high order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x80, F_CF); } @@ -7121,10 +7117,10 @@ ror_word(struct x86emu *emu, uint16_t d, uint8_t s) res = (d << (16 - cnt)); mask = (1 << (16 - cnt)) - 1; res |= (d >> (cnt)) & mask; - CONDITIONAL_SET_FLAG(res & 0x8000, F_CF); CONDITIONAL_SET_FLAG(s == 1 && XOR2(res >> 14), F_OF); - } else if (s != 0) { - /* set the new carry flag, Note that it is the low order bit + } + if (s != 0) { + /* set the new carry flag, Note that it is the high order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x8000, F_CF); } @@ -7145,10 +7141,10 @@ ror_long(struct x86emu *emu, uint32_t d, uint8_t s) res = (d << (32 - cnt)); mask = (1 << (32 - cnt)) - 1; res |= (d >> (cnt)) & mask; - CONDITIONAL_SET_FLAG(res & 0x80000000, F_CF); CONDITIONAL_SET_FLAG(s == 1 && XOR2(res >> 30), F_OF); - } else if (s != 0) { - /* set the new carry flag, Note that it is the low order bit + } + if (s != 0) { + /* set the new carry flag, Note that it is the high order bit * of the result!!! */ CONDITIONAL_SET_FLAG(res & 0x80000000, F_CF); } -- cgit v1.2.3 From bfe2514a08dc7bd67878b396101b457e2778abbd Mon Sep 17 00:00:00 2001 From: Jung-uk Kim Date: Thu, 18 Feb 2016 23:32:11 +0000 Subject: Remove a bogus bzero() call. Found by: PVS-Studio --- sys/dev/acpica/acpi_package.c | 1 - 1 file changed, 1 deletion(-) diff --git a/sys/dev/acpica/acpi_package.c b/sys/dev/acpica/acpi_package.c index c1070cb38deb..448d35ba733b 100644 --- a/sys/dev/acpica/acpi_package.c +++ b/sys/dev/acpica/acpi_package.c @@ -80,7 +80,6 @@ acpi_PkgStr(ACPI_OBJECT *res, int idx, void *dst, size_t size) obj = &res->Package.Elements[idx]; if (obj == NULL) return (EINVAL); - bzero(dst, sizeof(dst)); switch (obj->Type) { case ACPI_TYPE_STRING: -- cgit v1.2.3 From 40bf7442fa53f017e909443ba47015ee8f0dc29e Mon Sep 17 00:00:00 2001 From: Navdeep Parhar Date: Fri, 19 Feb 2016 00:29:16 +0000 Subject: cxgbe: catch up with the latest hardware-related definitions. Obtained from: Chelsio Communications Sponsored by: Chelsio Communications --- sys/dev/cxgbe/adapter.h | 2 +- sys/dev/cxgbe/common/common.h | 1 + sys/dev/cxgbe/common/t4_hw.h | 13 +- sys/dev/cxgbe/common/t4_msg.h | 629 +- sys/dev/cxgbe/common/t4_regs.h | 20588 +++++++++++++++++++++++++++++++- sys/dev/cxgbe/common/t4_regs_values.h | 73 +- sys/dev/cxgbe/common/t4_tcb.h | 13 +- sys/dev/cxgbe/tom/t4_connect.c | 12 +- 8 files changed, 21241 insertions(+), 90 deletions(-) diff --git a/sys/dev/cxgbe/adapter.h b/sys/dev/cxgbe/adapter.h index d7a6f76199a9..829457b07124 100644 --- a/sys/dev/cxgbe/adapter.h +++ b/sys/dev/cxgbe/adapter.h @@ -813,7 +813,7 @@ struct adapter { struct mtx regwin_lock; /* for indirect reads and memory windows */ an_handler_t an_handler __aligned(CACHE_LINE_SIZE); - fw_msg_handler_t fw_msg_handler[5]; /* NUM_FW6_TYPES */ + fw_msg_handler_t fw_msg_handler[7]; /* NUM_FW6_TYPES */ cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */ #ifdef INVARIANTS diff --git a/sys/dev/cxgbe/common/common.h b/sys/dev/cxgbe/common/common.h index 820354ab696f..a709f7301945 100644 --- a/sys/dev/cxgbe/common/common.h +++ b/sys/dev/cxgbe/common/common.h @@ -47,6 +47,7 @@ enum { MEM_EDC0, MEM_EDC1, MEM_MC, MEM_MC0 = MEM_MC, MEM_MC1 }; enum { MEMWIN0_APERTURE = 2048, MEMWIN0_BASE = 0x1b800, + MEMWIN1_APERTURE = 32768, MEMWIN1_BASE = 0x28000, diff --git a/sys/dev/cxgbe/common/t4_hw.h b/sys/dev/cxgbe/common/t4_hw.h index 34f462cb19e6..c1a5ce6be3b2 100644 --- a/sys/dev/cxgbe/common/t4_hw.h +++ b/sys/dev/cxgbe/common/t4_hw.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011 Chelsio Communications, Inc. + * Copyright (c) 2011, 2016 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -265,6 +265,12 @@ enum { FLASH_CFG_START = FLASH_START(FLASH_CFG_START_SEC), FLASH_CFG_MAX_SIZE = FLASH_MAX_SIZE(FLASH_CFG_NSECS), + /* + * We don't support FLASH devices which can't support the full + * standard set of sections which we need for normal operations. + */ + FLASH_MIN_SIZE = FLASH_CFG_START + FLASH_CFG_MAX_SIZE, + /* * Sectors 32-63 are reserved for FLASH failover. */ @@ -273,4 +279,9 @@ enum { #undef FLASH_START #undef FLASH_MAX_SIZE +#define S_SGE_TIMESTAMP 0 +#define M_SGE_TIMESTAMP 0xfffffffffffffffULL +#define V_SGE_TIMESTAMP(x) ((__u64)(x) << S_SGE_TIMESTAMP) +#define G_SGE_TIMESTAMP(x) (((__u64)(x) >> S_SGE_TIMESTAMP) & M_SGE_TIMESTAMP) + #endif /* __T4_HW_H */ diff --git a/sys/dev/cxgbe/common/t4_msg.h b/sys/dev/cxgbe/common/t4_msg.h index 2d8460492b2c..ea48fc6dde9d 100644 --- a/sys/dev/cxgbe/common/t4_msg.h +++ b/sys/dev/cxgbe/common/t4_msg.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011 Chelsio Communications, Inc. + * Copyright (c) 2011, 2016 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,8 +55,9 @@ enum { CPL_BARRIER = 0x18, CPL_TID_RELEASE = 0x1A, CPL_TAG_READ_REQ = 0x1B, + CPL_SRQ_TABLE_REQ = 0x1C, CPL_TX_PKT_FSO = 0x1E, - CPL_TX_PKT_ISO = 0x1F, + CPL_TX_DATA_ISO = 0x1F, CPL_CLOSE_LISTSRV_RPL = 0x20, CPL_ERROR = 0x21, @@ -97,7 +98,7 @@ enum { CPL_RX_DATA_DDP = 0x42, CPL_SMT_READ_RPL = 0x43, CPL_PASS_ACCEPT_REQ = 0x44, - CPL_RX2TX_PKT = 0x45, + CPL_RX_ISCSI_CMP = 0x45, CPL_RX_FCOE_DDP = 0x46, CPL_FCOE_HDR = 0x47, CPL_T5_TRACE_PKT = 0x48, @@ -124,6 +125,7 @@ enum { CPL_RDMA_ATOMIC_RPL = 0xAB, CPL_RDMA_IMM_DATA = 0xAC, CPL_RDMA_IMM_DATA_SE = 0xAD, + CPL_RX_MPS_PKT = 0xAF, CPL_TRACE_PKT = 0xB0, CPL_RX2TX_DATA = 0xB1, @@ -133,9 +135,11 @@ enum { CPL_FW4_MSG = 0xC0, CPL_FW4_PLD = 0xC1, CPL_FW4_ACK = 0xC3, + CPL_SRQ_TABLE_RPL = 0xCC, CPL_FW6_MSG = 0xE0, CPL_FW6_PLD = 0xE1, + CPL_TX_TNL_LSO = 0xEC, CPL_TX_PKT_LSO = 0xED, CPL_TX_PKT_XT = 0xEE, @@ -145,6 +149,7 @@ enum { enum CPL_error { CPL_ERR_NONE = 0, CPL_ERR_TCAM_PARITY = 1, + CPL_ERR_TCAM_MISS = 2, CPL_ERR_TCAM_FULL = 3, CPL_ERR_BAD_LENGTH = 15, CPL_ERR_BAD_ROUTE = 18, @@ -164,8 +169,24 @@ enum CPL_error { CPL_ERR_WAIT_ARP_RPL = 41, CPL_ERR_ABORT_FAILED = 42, CPL_ERR_IWARP_FLM = 50, + CPL_CONTAINS_READ_RPL = 60, + CPL_CONTAINS_WRITE_RPL = 61, }; +/* + * Some of the error codes above implicitly indicate that there is no TID + * allocated with the result of an ACT_OPEN. We use this predicate to make + * that explicit. + */ +static inline int act_open_has_tid(int status) +{ + return (status != CPL_ERR_TCAM_PARITY && + status != CPL_ERR_TCAM_MISS && + status != CPL_ERR_TCAM_FULL && + status != CPL_ERR_CONN_EXIST_SYNRECV && + status != CPL_ERR_CONN_EXIST); +} + enum { CPL_CONN_POLICY_AUTO = 0, CPL_CONN_POLICY_ASK = 1, @@ -337,6 +358,12 @@ struct rss_header { #define M_QNUM 0xFFFF #define G_QNUM(x) (((x) >> S_QNUM) & M_QNUM) +#if defined(RSS_HDR_VLD) || defined(CHELSIO_FW) +# define RSS_HDR struct rss_header rss_hdr; +#else +# define RSS_HDR +#endif + #ifndef CHELSIO_FW struct work_request_hdr { __be32 wr_hi; @@ -358,11 +385,9 @@ struct work_request_hdr { # define WR_HDR struct work_request_hdr wr # define WR_HDR_SIZE sizeof(struct work_request_hdr) -# define RSS_HDR #else # define WR_HDR # define WR_HDR_SIZE 0 -# define RSS_HDR struct rss_header rss_hdr; #endif /* option 0 fields */ @@ -480,6 +505,11 @@ struct work_request_hdr { #define V_CONN_POLICY(x) ((x) << S_CONN_POLICY) #define G_CONN_POLICY(x) (((x) >> S_CONN_POLICY) & M_CONN_POLICY) +#define S_T5_FILT_INFO 24 +#define M_T5_FILT_INFO 0xffffffffffULL +#define V_T5_FILT_INFO(x) ((x) << S_T5_FILT_INFO) +#define G_T5_FILT_INFO(x) (((x) >> S_T5_FILT_INFO) & M_T5_FILT_INFO) + #define S_FILT_INFO 28 #define M_FILT_INFO 0xfffffffffULL #define V_FILT_INFO(x) ((x) << S_FILT_INFO) @@ -518,6 +548,10 @@ struct work_request_hdr { #define V_CONG_CNTRL_VALID(x) ((x) << S_CONG_CNTRL_VALID) #define F_CONG_CNTRL_VALID V_CONG_CNTRL_VALID(1U) +#define S_T5_ISS 18 +#define V_T5_ISS(x) ((x) << S_T5_ISS) +#define F_T5_ISS V_T5_ISS(1U) + #define S_PACE_VALID 19 #define V_PACE_VALID(x) ((x) << S_PACE_VALID) #define F_PACE_VALID V_PACE_VALID(1U) @@ -617,11 +651,30 @@ struct cpl_pass_establish { #define G_PASS_OPEN_TOS(x) (((x) >> S_PASS_OPEN_TOS) & M_PASS_OPEN_TOS) /* cpl_pass_establish.tcp_opt fields (also applies to act_open_establish) */ -#define G_TCPOPT_WSCALE_OK(x) (((x) >> 5) & 1) -#define G_TCPOPT_SACK(x) (((x) >> 6) & 1) -#define G_TCPOPT_TSTAMP(x) (((x) >> 7) & 1) -#define G_TCPOPT_SND_WSCALE(x) (((x) >> 8) & 0xf) -#define G_TCPOPT_MSS(x) (((x) >> 12) & 0xf) +#define S_TCPOPT_WSCALE_OK 5 +#define M_TCPOPT_WSCALE_OK 0x1 +#define V_TCPOPT_WSCALE_OK(x) ((x) << S_TCPOPT_WSCALE_OK) +#define G_TCPOPT_WSCALE_OK(x) (((x) >> S_TCPOPT_WSCALE_OK) & M_TCPOPT_WSCALE_OK) + +#define S_TCPOPT_SACK 6 +#define M_TCPOPT_SACK 0x1 +#define V_TCPOPT_SACK(x) ((x) << S_TCPOPT_SACK) +#define G_TCPOPT_SACK(x) (((x) >> S_TCPOPT_SACK) & M_TCPOPT_SACK) + +#define S_TCPOPT_TSTAMP 7 +#define M_TCPOPT_TSTAMP 0x1 +#define V_TCPOPT_TSTAMP(x) ((x) << S_TCPOPT_TSTAMP) +#define G_TCPOPT_TSTAMP(x) (((x) >> S_TCPOPT_TSTAMP) & M_TCPOPT_TSTAMP) + +#define S_TCPOPT_SND_WSCALE 8 +#define M_TCPOPT_SND_WSCALE 0xF +#define V_TCPOPT_SND_WSCALE(x) ((x) << S_TCPOPT_SND_WSCALE) +#define G_TCPOPT_SND_WSCALE(x) (((x) >> S_TCPOPT_SND_WSCALE) & M_TCPOPT_SND_WSCALE) + +#define S_TCPOPT_MSS 12 +#define M_TCPOPT_MSS 0xF +#define V_TCPOPT_MSS(x) ((x) << S_TCPOPT_MSS) +#define G_TCPOPT_MSS(x) (((x) >> S_TCPOPT_MSS) & M_TCPOPT_MSS) struct cpl_pass_accept_req { RSS_HDR @@ -646,16 +699,29 @@ struct cpl_pass_accept_req { #define V_TCP_HDR_LEN(x) ((x) << S_TCP_HDR_LEN) #define G_TCP_HDR_LEN(x) (((x) >> S_TCP_HDR_LEN) & M_TCP_HDR_LEN) +#define S_T6_TCP_HDR_LEN 8 +#define V_T6_TCP_HDR_LEN(x) ((x) << S_T6_TCP_HDR_LEN) +#define G_T6_TCP_HDR_LEN(x) (((x) >> S_T6_TCP_HDR_LEN) & M_TCP_HDR_LEN) + #define S_IP_HDR_LEN 16 #define M_IP_HDR_LEN 0x3FF #define V_IP_HDR_LEN(x) ((x) << S_IP_HDR_LEN) #define G_IP_HDR_LEN(x) (((x) >> S_IP_HDR_LEN) & M_IP_HDR_LEN) +#define S_T6_IP_HDR_LEN 14 +#define V_T6_IP_HDR_LEN(x) ((x) << S_T6_IP_HDR_LEN) +#define G_T6_IP_HDR_LEN(x) (((x) >> S_T6_IP_HDR_LEN) & M_IP_HDR_LEN) + #define S_ETH_HDR_LEN 26 #define M_ETH_HDR_LEN 0x3F #define V_ETH_HDR_LEN(x) ((x) << S_ETH_HDR_LEN) #define G_ETH_HDR_LEN(x) (((x) >> S_ETH_HDR_LEN) & M_ETH_HDR_LEN) +#define S_T6_ETH_HDR_LEN 24 +#define M_T6_ETH_HDR_LEN 0xFF +#define V_T6_ETH_HDR_LEN(x) ((x) << S_T6_ETH_HDR_LEN) +#define G_T6_ETH_HDR_LEN(x) (((x) >> S_T6_ETH_HDR_LEN) & M_T6_ETH_HDR_LEN) + /* cpl_pass_accept_req.l2info fields */ #define S_SYN_MAC_IDX 0 #define M_SYN_MAC_IDX 0x1FF @@ -684,7 +750,10 @@ struct cpl_t5_pass_accept_rpl { __be32 opt2; __be64 opt0; __be32 iss; - __be32 rsvd; + union { + __be32 rsvd; /* T5 */ + __be32 opt3; /* T6 */ + } u; }; struct cpl_act_open_req { @@ -716,6 +785,26 @@ struct cpl_t5_act_open_req { __be64 params; }; +struct cpl_t6_act_open_req { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be32 local_ip; + __be32 peer_ip; + __be64 opt0; + __be32 iss; + __be32 opt2; + __be64 params; + __be32 rsvd2; + __be32 opt3; +}; + +/* cpl_{t5,t6}_act_open_req.params field */ +#define S_AOPEN_FCOEMASK 0 +#define V_AOPEN_FCOEMASK(x) ((x) << S_AOPEN_FCOEMASK) +#define F_AOPEN_FCOEMASK V_AOPEN_FCOEMASK(1U) + struct cpl_act_open_req6 { WR_HDR; union opcode_tid ot; @@ -745,6 +834,23 @@ struct cpl_t5_act_open_req6 { __be64 params; }; +struct cpl_t6_act_open_req6 { + WR_HDR; + union opcode_tid ot; + __be16 local_port; + __be16 peer_port; + __be64 local_ip_hi; + __be64 local_ip_lo; + __be64 peer_ip_hi; + __be64 peer_ip_lo; + __be64 opt0; + __be32 iss; + __be32 opt2; + __be64 params; + __be32 rsvd2; + __be32 opt3; +}; + struct cpl_act_open_rpl { RSS_HDR union opcode_tid ot; @@ -887,6 +993,11 @@ struct cpl_abort_req_rss { __u8 status; }; +/* cpl_abort_req status command code in case of T6, + * bit[0] specifies whether to send RST (0) to remote peer or suppress it (1) + * bit[1] indicates ABORT_REQ was sent after a CLOSE_CON_REQ + * bit[2] specifies whether to disable the mmgr (1) or not (0) + */ struct cpl_abort_req { WR_HDR; union opcode_tid ot; @@ -978,10 +1089,14 @@ struct cpl_tx_data { #define G_TX_ULP_SUBMODE(x) (((x) >> S_TX_ULP_SUBMODE) & M_TX_ULP_SUBMODE) #define S_TX_ULP_MODE 10 -#define M_TX_ULP_MODE 0xF +#define M_TX_ULP_MODE 0x7 #define V_TX_ULP_MODE(x) ((x) << S_TX_ULP_MODE) #define G_TX_ULP_MODE(x) (((x) >> S_TX_ULP_MODE) & M_TX_ULP_MODE) +#define S_TX_FORCE 13 +#define V_TX_FORCE(x) ((x) << S_TX_FORCE) +#define F_TX_FORCE V_TX_FORCE(1U) + #define S_TX_SHOVE 14 #define V_TX_SHOVE(x) ((x) << S_TX_SHOVE) #define F_TX_SHOVE V_TX_SHOVE(1U) @@ -1006,6 +1121,10 @@ struct cpl_tx_data { #define V_TX_TNL(x) ((x) << S_TX_TNL) #define F_TX_TNL V_TX_TNL(1U) +#define S_T6_TX_FORCE 20 +#define V_T6_TX_FORCE(x) ((x) << S_T6_TX_FORCE) +#define F_T6_TX_FORCE V_T6_TX_FORCE(1U) + /* additional tx_data_wr.flags fields */ #define S_TX_CPU_IDX 0 #define M_TX_CPU_IDX 0x3F @@ -1142,6 +1261,10 @@ struct cpl_tx_pkt { #define V_TXPKT_IPHDR_LEN(x) ((__u64)(x) << S_TXPKT_IPHDR_LEN) #define G_TXPKT_IPHDR_LEN(x) (((x) >> S_TXPKT_IPHDR_LEN) & M_TXPKT_IPHDR_LEN) +#define M_T6_TXPKT_IPHDR_LEN 0xFFF +#define G_T6_TXPKT_IPHDR_LEN(x) \ + (((x) >> S_TXPKT_IPHDR_LEN) & M_T6_TXPKT_IPHDR_LEN) + #define S_TXPKT_CSUM_LOC 30 #define M_TXPKT_CSUM_LOC 0x3FF #define V_TXPKT_CSUM_LOC(x) ((__u64)(x) << S_TXPKT_CSUM_LOC) @@ -1152,6 +1275,12 @@ struct cpl_tx_pkt { #define V_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_TXPKT_ETHHDR_LEN) #define G_TXPKT_ETHHDR_LEN(x) (((x) >> S_TXPKT_ETHHDR_LEN) & M_TXPKT_ETHHDR_LEN) +#define S_T6_TXPKT_ETHHDR_LEN 32 +#define M_T6_TXPKT_ETHHDR_LEN 0xFF +#define V_T6_TXPKT_ETHHDR_LEN(x) ((__u64)(x) << S_T6_TXPKT_ETHHDR_LEN) +#define G_T6_TXPKT_ETHHDR_LEN(x) \ + (((x) >> S_T6_TXPKT_ETHHDR_LEN) & M_T6_TXPKT_ETHHDR_LEN) + #define S_TXPKT_CSUM_TYPE 40 #define M_TXPKT_CSUM_TYPE 0xF #define V_TXPKT_CSUM_TYPE(x) ((__u64)(x) << S_TXPKT_CSUM_TYPE) @@ -1296,38 +1425,83 @@ struct cpl_iscsi_hdr_no_rss { }; struct cpl_tx_data_iso { - WR_HDR; - __be32 iso_ctrl; - __u8 rsvd; + __be32 op_to_scsi; + __u8 reserved1; __u8 ahs_len; - __be16 mss; + __be16 mpdu; __be32 burst_size; __be32 len; + __be32 reserved2_seglen_offset; + __be32 datasn_offset; + __be32 buffer_offset; + __be32 reserved3; + /* encapsulated CPL_TX_DATA follows here */ }; -/* cpl_tx_data_iso.iso_ctrl fields different from cpl_tx_pkt_lso.lso_ctrl */ -#define S_ISO_CPLHDR_LEN 18 -#define M_ISO_CPLHDR_LEN 0xF -#define V_ISO_CPLHDR_LEN(x) ((x) << S_ISO_CPLHDR_LEN) -#define G_ISO_CPLHDR_LEN(x) (((x) >> S_ISO_CPLHDR_LEN) & M_ISO_CPLHDR_LEN) - -#define S_ISO_HDR_CRC 17 -#define V_ISO_HDR_CRC(x) ((x) << S_ISO_HDR_CRC) -#define F_ISO_HDR_CRC V_ISO_HDR_CRC(1U) - -#define S_ISO_DATA_CRC 16 -#define V_ISO_DATA_CRC(x) ((x) << S_ISO_DATA_CRC) -#define F_ISO_DATA_CRC V_ISO_DATA_CRC(1U) - -#define S_ISO_IMD_DATA_EN 15 -#define V_ISO_IMD_DATA_EN(x) ((x) << S_ISO_IMD_DATA_EN) -#define F_ISO_IMD_DATA_EN V_ISO_IMD_DATA_EN(1U) - -#define S_ISO_PDU_TYPE 13 -#define M_ISO_PDU_TYPE 0x3 -#define V_ISO_PDU_TYPE(x) ((x) << S_ISO_PDU_TYPE) -#define G_ISO_PDU_TYPE(x) (((x) >> S_ISO_PDU_TYPE) & M_ISO_PDU_TYPE) +/* cpl_tx_data_iso.op_to_scsi fields */ +#define S_CPL_TX_DATA_ISO_OP 24 +#define M_CPL_TX_DATA_ISO_OP 0xff +#define V_CPL_TX_DATA_ISO_OP(x) ((x) << S_CPL_TX_DATA_ISO_OP) +#define G_CPL_TX_DATA_ISO_OP(x) \ + (((x) >> S_CPL_TX_DATA_ISO_OP) & M_CPL_TX_DATA_ISO_OP) + +#define S_CPL_TX_DATA_ISO_FIRST 23 +#define M_CPL_TX_DATA_ISO_FIRST 0x1 +#define V_CPL_TX_DATA_ISO_FIRST(x) ((x) << S_CPL_TX_DATA_ISO_FIRST) +#define G_CPL_TX_DATA_ISO_FIRST(x) \ + (((x) >> S_CPL_TX_DATA_ISO_FIRST) & M_CPL_TX_DATA_ISO_FIRST) +#define F_CPL_TX_DATA_ISO_FIRST V_CPL_TX_DATA_ISO_FIRST(1U) + +#define S_CPL_TX_DATA_ISO_LAST 22 +#define M_CPL_TX_DATA_ISO_LAST 0x1 +#define V_CPL_TX_DATA_ISO_LAST(x) ((x) << S_CPL_TX_DATA_ISO_LAST) +#define G_CPL_TX_DATA_ISO_LAST(x) \ + (((x) >> S_CPL_TX_DATA_ISO_LAST) & M_CPL_TX_DATA_ISO_LAST) +#define F_CPL_TX_DATA_ISO_LAST V_CPL_TX_DATA_ISO_LAST(1U) + +#define S_CPL_TX_DATA_ISO_CPLHDRLEN 21 +#define M_CPL_TX_DATA_ISO_CPLHDRLEN 0x1 +#define V_CPL_TX_DATA_ISO_CPLHDRLEN(x) ((x) << S_CPL_TX_DATA_ISO_CPLHDRLEN) +#define G_CPL_TX_DATA_ISO_CPLHDRLEN(x) \ + (((x) >> S_CPL_TX_DATA_ISO_CPLHDRLEN) & M_CPL_TX_DATA_ISO_CPLHDRLEN) +#define F_CPL_TX_DATA_ISO_CPLHDRLEN V_CPL_TX_DATA_ISO_CPLHDRLEN(1U) + +#define S_CPL_TX_DATA_ISO_HDRCRC 20 +#define M_CPL_TX_DATA_ISO_HDRCRC 0x1 +#define V_CPL_TX_DATA_ISO_HDRCRC(x) ((x) << S_CPL_TX_DATA_ISO_HDRCRC) +#define G_CPL_TX_DATA_ISO_HDRCRC(x) \ + (((x) >> S_CPL_TX_DATA_ISO_HDRCRC) & M_CPL_TX_DATA_ISO_HDRCRC) +#define F_CPL_TX_DATA_ISO_HDRCRC V_CPL_TX_DATA_ISO_HDRCRC(1U) + +#define S_CPL_TX_DATA_ISO_PLDCRC 19 +#define M_CPL_TX_DATA_ISO_PLDCRC 0x1 +#define V_CPL_TX_DATA_ISO_PLDCRC(x) ((x) << S_CPL_TX_DATA_ISO_PLDCRC) +#define G_CPL_TX_DATA_ISO_PLDCRC(x) \ + (((x) >> S_CPL_TX_DATA_ISO_PLDCRC) & M_CPL_TX_DATA_ISO_PLDCRC) +#define F_CPL_TX_DATA_ISO_PLDCRC V_CPL_TX_DATA_ISO_PLDCRC(1U) + +#define S_CPL_TX_DATA_ISO_IMMEDIATE 18 +#define M_CPL_TX_DATA_ISO_IMMEDIATE 0x1 +#define V_CPL_TX_DATA_ISO_IMMEDIATE(x) ((x) << S_CPL_TX_DATA_ISO_IMMEDIATE) +#define G_CPL_TX_DATA_ISO_IMMEDIATE(x) \ + (((x) >> S_CPL_TX_DATA_ISO_IMMEDIATE) & M_CPL_TX_DATA_ISO_IMMEDIATE) +#define F_CPL_TX_DATA_ISO_IMMEDIATE V_CPL_TX_DATA_ISO_IMMEDIATE(1U) + +#define S_CPL_TX_DATA_ISO_SCSI 16 +#define M_CPL_TX_DATA_ISO_SCSI 0x3 +#define V_CPL_TX_DATA_ISO_SCSI(x) ((x) << S_CPL_TX_DATA_ISO_SCSI) +#define G_CPL_TX_DATA_ISO_SCSI(x) \ + (((x) >> S_CPL_TX_DATA_ISO_SCSI) & M_CPL_TX_DATA_ISO_SCSI) + +/* cpl_tx_data_iso.reserved2_seglen_offset fields */ +#define S_CPL_TX_DATA_ISO_SEGLEN_OFFSET 0 +#define M_CPL_TX_DATA_ISO_SEGLEN_OFFSET 0xffffff +#define V_CPL_TX_DATA_ISO_SEGLEN_OFFSET(x) \ + ((x) << S_CPL_TX_DATA_ISO_SEGLEN_OFFSET) +#define G_CPL_TX_DATA_ISO_SEGLEN_OFFSET(x) \ + (((x) >> S_CPL_TX_DATA_ISO_SEGLEN_OFFSET) & \ + M_CPL_TX_DATA_ISO_SEGLEN_OFFSET) struct cpl_iscsi_hdr { RSS_HDR @@ -1400,6 +1574,19 @@ struct cpl_fcoe_hdr { __be32 param; }; +/* cpl_fcoe_hdr.rctl_fctl fields */ +#define S_FCOE_FCHDR_RCTL 24 +#define M_FCOE_FCHDR_RCTL 0xff +#define V_FCOE_FCHDR_RCTL(x) ((x) << S_FCOE_FCHDR_RCTL) +#define G_FCOE_FCHDR_RCTL(x) \ + (((x) >> S_FCOE_FCHDR_RCTL) & M_FCOE_FCHDR_RCTL) + +#define S_FCOE_FCHDR_FCTL 0 +#define M_FCOE_FCHDR_FCTL 0xffffff +#define V_FCOE_FCHDR_FCTL(x) ((x) << S_FCOE_FCHDR_FCTL) +#define G_FCOE_FCHDR_FCTL(x) \ + (((x) >> S_FCOE_FCHDR_FCTL) & M_FCOE_FCHDR_FCTL) + struct cpl_fcoe_data { RSS_HDR union opcode_tid ot; @@ -1527,6 +1714,19 @@ struct cpl_rx_iscsi_dif { __u8 rsvd1[4]; }; +struct cpl_rx_iscsi_cmp { + RSS_HDR + union opcode_tid ot; + __be16 pdu_len_ddp; + __be16 len; + __be32 seq; + __be16 urg; + __u8 rsvd; + __u8 status; + __be32 ulp_crc; + __be32 ddpvld; +}; + struct cpl_rx_fcoe_dif { RSS_HDR union opcode_tid ot; @@ -1671,6 +1871,9 @@ struct cpl_rx_pkt { #define V_RX_T5_ETHHDR_LEN(x) ((x) << S_RX_T5_ETHHDR_LEN) #define G_RX_T5_ETHHDR_LEN(x) (((x) >> S_RX_T5_ETHHDR_LEN) & M_RX_T5_ETHHDR_LEN) +#define M_RX_T6_ETHHDR_LEN 0xFF +#define G_RX_T6_ETHHDR_LEN(x) (((x) >> S_RX_ETHHDR_LEN) & M_RX_T6_ETHHDR_LEN) + #define S_RX_PKTYPE 5 #define M_RX_PKTYPE 0x7 #define V_RX_PKTYPE(x) ((x) << S_RX_PKTYPE) @@ -1801,6 +2004,65 @@ struct cpl_rx_pkt { #define V_RXERR_PING(x) ((x) << S_RXERR_PING) #define F_RXERR_PING V_RXERR_PING(1U) +/* In T6, rx_pkt.err_vec indicates + * RxError Error vector (16b) or + * Encapsulating header length (8b), + * Outer encapsulation type (2b) and + * compressed error vector (6b) if CRxPktEnc is + * enabled in TP_OUT_CONFIG + */ + +#define S_T6_COMPR_RXERR_VEC 0 +#define M_T6_COMPR_RXERR_VEC 0x3F +#define V_T6_COMPR_RXERR_VEC(x) ((x) << S_T6_COMPR_RXERR_LEN) +#define G_T6_COMPR_RXERR_VEC(x) \ + (((x) >> S_T6_COMPR_RXERR_VEC) & M_T6_COMPR_RXERR_VEC) + +#define S_T6_COMPR_RXERR_MAC 0 +#define V_T6_COMPR_RXERR_MAC(x) ((x) << S_T6_COMPR_RXERR_MAC) +#define F_T6_COMPR_RXERR_MAC V_T6_COMPR_RXERR_MAC(1U) + +/* Logical OR of RX_ERROR_PKT_LEN, RX_ERROR_TCP_HDR_LEN + * RX_ERROR_IP_HDR_LEN, RX_ERROR_ETH_HDR_LEN + */ +#define S_T6_COMPR_RXERR_LEN 1 +#define V_T6_COMPR_RXERR_LEN(x) ((x) << S_COMPR_T6_RXERR_LEN) +#define F_T6_COMPR_RXERR_LEN V_COMPR_T6_RXERR_LEN(1U) + +#define S_T6_COMPR_RXERR_TCP_OPT 2 +#define V_T6_COMPR_RXERR_TCP_OPT(x) ((x) << S_T6_COMPR_RXERR_TCP_OPT) +#define F_T6_COMPR_RXERR_TCP_OPT V_T6_COMPR_RXERR_TCP_OPT(1U) + +#define S_T6_COMPR_RXERR_IPV6_EXT 3 +#define V_T6_COMPR_RXERR_IPV6_EXT(x) ((x) << S_T6_COMPR_RXERR_IPV6_EXT) +#define F_T6_COMPR_RXERR_IPV6_EXT V_T6_COMPR_RXERR_IPV6_EXT(1U) + +/* Logical OR of RX_ERROR_CSUM, RX_ERROR_CSIP */ +#define S_T6_COMPR_RXERR_SUM 4 +#define V_T6_COMPR_RXERR_SUM(x) ((x) << S_T6_COMPR_RXERR_SUM) +#define F_T6_COMPR_RXERR_SUM V_T6_COMPR_RXERR_SUM(1U) + +/* Logical OR of RX_ERROR_FPMA, RX_ERROR_PING_DROP, + * RX_ERROR_ATTACK, RX_ERROR_FRAG,RX_ERROR_IPVERSION + */ +#define S_T6_COMPR_RXERR_MISC 5 +#define V_T6_COMPR_RXERR_MISC(x) ((x) << S_T6_COMPR_RXERR_MISC) +#define F_T6_COMPR_RXERR_MISC V_T6_COMPR_RXERR_MISC(1U) + +#define S_T6_RX_TNL_TYPE 6 +#define M_T6_RX_TNL_TYPE 0x3 +#define V_T6_RX_TNL_TYPE(x) ((x) << S_T6_RX_TNL_TYPE) +#define G_T6_RX_TNL_TYPE(x) (((x) >> S_T6_RX_TNL_TYPE) & M_T6_RX_TNL_TYPE) + +#define RX_PKT_TNL_TYPE_NVGRE 1 +#define RX_PKT_TNL_TYPE_VXLAN 2 +#define RX_PKT_TNL_TYPE_GENEVE 3 + +#define S_T6_RX_TNLHDR_LEN 8 +#define M_T6_RX_TNLHDR_LEN 0xFF +#define V_T6_RX_TNLHDR_LEN(x) ((x) << S_T6_RX_TNLHDR_LEN) +#define G_T6_RX_TNLHDR_LEN(x) (((x) >> S_T6_RX_TNLHDR_LEN) & M_T6_RX_TNLHDR_LEN) + struct cpl_trace_pkt { RSS_HDR __u8 opcode; @@ -1996,6 +2258,51 @@ struct cpl_l2t_read_rpl { __u8 dst_mac[6]; }; +struct cpl_srq_table_req { + WR_HDR; + union opcode_tid ot; + __u8 status; + __u8 rsvd[2]; + __u8 idx; + __be64 rsvd_pdid; + __be32 qlen_qbase; + __be16 cur_msn; + __be16 max_msn; +}; + +struct cpl_srq_table_rpl { + RSS_HDR + union opcode_tid ot; + __u8 status; + __u8 rsvd[2]; + __u8 idx; + __be64 rsvd_pdid; + __be32 qlen_qbase; + __be16 cur_msn; + __be16 max_msn; +}; + +/* cpl_srq_table_{req,rpl}.params fields */ +#define S_SRQT_QLEN 28 +#define M_SRQT_QLEN 0xF +#define V_SRQT_QLEN(x) ((x) << S_SRQT_QLEN) +#define G_SRQT_QLEN(x) (((x) >> S_SRQT_QLEN) & M_SRQT_QLEN) + +#define S_SRQT_QBASE 0 +#define M_SRQT_QBASE 0x3FFFFFF +#define V_SRQT_QBASE(x) ((x) << S_SRQT_QBASE) +#define G_SRQT_QBASE(x) (((x) >> S_SRQT_QBASE) & M_SRQT_QBASE) + +#define S_SRQT_PDID 0 +#define M_SRQT_PDID 0xFF +#define V_SRQT_PDID(x) ((x) << S_SRQT_PDID) +#define G_SRQT_PDID(x) (((x) >> S_SRQT_PDID) & M_SRQT_PDID) + +#define S_SRQT_IDX 0 +#define M_SRQT_IDX 0xF +#define V_SRQT_IDX(x) ((x) << S_SRQT_IDX) +#define G_SRQT_IDX(x) (((x) >> S_SRQT_IDX) & M_SRQT_IDX) + struct cpl_smt_write_req { WR_HDR; union opcode_tid ot; @@ -2006,6 +2313,17 @@ struct cpl_smt_write_req { __u8 src_mac0[6]; }; +struct cpl_t6_smt_write_req { + WR_HDR; + union opcode_tid ot; + __be32 params; + __be64 tag; + __be16 pfvf0; + __u8 src_mac0[6]; + __be32 local_ip; + __be32 rsvd; +}; + struct cpl_smt_write_rpl { RSS_HDR union opcode_tid ot; @@ -2042,6 +2360,9 @@ struct cpl_smt_read_rpl { #define V_SMTW_IDX(x) ((x) << S_SMTW_IDX) #define G_SMTW_IDX(x) (((x) >> S_SMTW_IDX) & M_SMTW_IDX) +#define M_T6_SMTW_IDX 0xFF +#define G_T6_SMTW_IDX(x) (((x) >> S_SMTW_IDX) & M_T6_SMTW_IDX) + #define S_SMTW_NORPL 31 #define V_SMTW_NORPL(x) ((x) << S_SMTW_NORPL) #define F_SMTW_NORPL V_SMTW_NORPL(1U) @@ -2272,6 +2593,11 @@ struct cpl_sge_egr_update { }; /* cpl_sge_egr_update.ot fields */ +#define S_AUTOEQU 22 +#define M_AUTOEQU 0x1 +#define V_AUTOEQU(x) ((x) << S_AUTOEQU) +#define G_AUTOEQU(x) (((x) >> S_AUTOEQU) & M_AUTOEQU) + #define S_EGR_QID 0 #define M_EGR_QID 0x1FFFF #define V_EGR_QID(x) ((x) << S_EGR_QID) @@ -2284,6 +2610,8 @@ enum { FW_TYPE_CQE = 2, FW_TYPE_OFLD_CONNECTION_WR_RPL = 3, FW_TYPE_RSSCPL = 4, + FW_TYPE_WRERR_RPL = 5, + FW_TYPE_PI_ERR = 6, }; struct cpl_fw2_pld { @@ -2359,7 +2687,8 @@ enum { FW6_TYPE_CQE = FW_TYPE_CQE, FW6_TYPE_OFLD_CONNECTION_WR_RPL = FW_TYPE_OFLD_CONNECTION_WR_RPL, FW6_TYPE_RSSCPL = FW_TYPE_RSSCPL, - + FW6_TYPE_WRERR_RPL = FW_TYPE_WRERR_RPL, + FW6_TYPE_PI_ERR = FW_TYPE_PI_ERR, NUM_FW6_TYPES }; @@ -2382,7 +2711,8 @@ enum { ULP_TX_SC_NOOP = 0x80, ULP_TX_SC_IMM = 0x81, ULP_TX_SC_DSGL = 0x82, - ULP_TX_SC_ISGL = 0x83 + ULP_TX_SC_ISGL = 0x83, + ULP_TX_SC_PICTRL = 0x84 }; #define S_ULPTX_CMD 24 @@ -2455,6 +2785,10 @@ struct ulp_mem_io { #define V_T5_ULP_MEMIO_ORDER(x) ((x) << S_T5_ULP_MEMIO_ORDER) #define F_T5_ULP_MEMIO_ORDER V_T5_ULP_MEMIO_ORDER(1U) +#define S_T5_ULP_MEMIO_FID 4 +#define M_T5_ULP_MEMIO_FID 0x7ff +#define V_T5_ULP_MEMIO_FID(x) ((x) << S_T5_ULP_MEMIO_FID) + /* ulp_mem_io.lock_addr fields */ #define S_ULP_MEMIO_ADDR 0 #define M_ULP_MEMIO_ADDR 0x7FFFFFF @@ -2495,4 +2829,219 @@ struct ulp_txpkt { #define V_ULP_TXPKT_RO(x) ((x) << S_ULP_TXPKT_RO) #define F_ULP_TXPKT_RO V_ULP_TXPKT_RO(1U) +enum cpl_tx_tnl_lso_type { + TX_TNL_TYPE_OPAQUE, + TX_TNL_TYPE_NVGRE, + TX_TNL_TYPE_VXLAN, + TX_TNL_TYPE_GENEVE, +}; + +struct cpl_tx_tnl_lso { + __be32 op_to_IpIdSplitOut; + __be16 IpIdOffsetOut; + __be16 UdpLenSetOut_to_TnlHdrLen; + __be64 r1; + __be32 Flow_to_TcpHdrLen; + __be16 IpIdOffset; + __be16 IpIdSplit_to_Mss; + __be32 TCPSeqOffset; + __be32 EthLenOffset_Size; + /* encapsulated CPL (TX_PKT_XT) follows here */ +}; + +#define S_CPL_TX_TNL_LSO_OPCODE 24 +#define M_CPL_TX_TNL_LSO_OPCODE 0xff +#define V_CPL_TX_TNL_LSO_OPCODE(x) ((x) << S_CPL_TX_TNL_LSO_OPCODE) +#define G_CPL_TX_TNL_LSO_OPCODE(x) \ + (((x) >> S_CPL_TX_TNL_LSO_OPCODE) & M_CPL_TX_TNL_LSO_OPCODE) + +#define S_CPL_TX_TNL_LSO_FIRST 23 +#define M_CPL_TX_TNL_LSO_FIRST 0x1 +#define V_CPL_TX_TNL_LSO_FIRST(x) ((x) << S_CPL_TX_TNL_LSO_FIRST) +#define G_CPL_TX_TNL_LSO_FIRST(x) \ + (((x) >> S_CPL_TX_TNL_LSO_FIRST) & M_CPL_TX_TNL_LSO_FIRST) +#define F_CPL_TX_TNL_LSO_FIRST V_CPL_TX_TNL_LSO_FIRST(1U) + +#define S_CPL_TX_TNL_LSO_LAST 22 +#define M_CPL_TX_TNL_LSO_LAST 0x1 +#define V_CPL_TX_TNL_LSO_LAST(x) ((x) << S_CPL_TX_TNL_LSO_LAST) +#define G_CPL_TX_TNL_LSO_LAST(x) \ + (((x) >> S_CPL_TX_TNL_LSO_LAST) & M_CPL_TX_TNL_LSO_LAST) +#define F_CPL_TX_TNL_LSO_LAST V_CPL_TX_TNL_LSO_LAST(1U) + +#define S_CPL_TX_TNL_LSO_ETHHDRLENXOUT 21 +#define M_CPL_TX_TNL_LSO_ETHHDRLENXOUT 0x1 +#define V_CPL_TX_TNL_LSO_ETHHDRLENXOUT(x) \ + ((x) << S_CPL_TX_TNL_LSO_ETHHDRLENXOUT) +#define G_CPL_TX_TNL_LSO_ETHHDRLENXOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLENXOUT) & M_CPL_TX_TNL_LSO_ETHHDRLENXOUT) +#define F_CPL_TX_TNL_LSO_ETHHDRLENXOUT V_CPL_TX_TNL_LSO_ETHHDRLENXOUT(1U) + +#define S_CPL_TX_TNL_LSO_IPV6OUT 20 +#define M_CPL_TX_TNL_LSO_IPV6OUT 0x1 +#define V_CPL_TX_TNL_LSO_IPV6OUT(x) ((x) << S_CPL_TX_TNL_LSO_IPV6OUT) +#define G_CPL_TX_TNL_LSO_IPV6OUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPV6OUT) & M_CPL_TX_TNL_LSO_IPV6OUT) +#define F_CPL_TX_TNL_LSO_IPV6OUT V_CPL_TX_TNL_LSO_IPV6OUT(1U) + +#define S_CPL_TX_TNL_LSO_ETHHDRLENOUT 16 +#define M_CPL_TX_TNL_LSO_ETHHDRLENOUT 0xf +#define V_CPL_TX_TNL_LSO_ETHHDRLENOUT(x) \ + ((x) << S_CPL_TX_TNL_LSO_ETHHDRLENOUT) +#define G_CPL_TX_TNL_LSO_ETHHDRLENOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLENOUT) & M_CPL_TX_TNL_LSO_ETHHDRLENOUT) + +#define S_CPL_TX_TNL_LSO_IPHDRLENOUT 4 +#define M_CPL_TX_TNL_LSO_IPHDRLENOUT 0xfff +#define V_CPL_TX_TNL_LSO_IPHDRLENOUT(x) ((x) << S_CPL_TX_TNL_LSO_IPHDRLENOUT) +#define G_CPL_TX_TNL_LSO_IPHDRLENOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPHDRLENOUT) & M_CPL_TX_TNL_LSO_IPHDRLENOUT) + +#define S_CPL_TX_TNL_LSO_IPHDRCHKOUT 3 +#define M_CPL_TX_TNL_LSO_IPHDRCHKOUT 0x1 +#define V_CPL_TX_TNL_LSO_IPHDRCHKOUT(x) ((x) << S_CPL_TX_TNL_LSO_IPHDRCHKOUT) +#define G_CPL_TX_TNL_LSO_IPHDRCHKOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPHDRCHKOUT) & M_CPL_TX_TNL_LSO_IPHDRCHKOUT) +#define F_CPL_TX_TNL_LSO_IPHDRCHKOUT V_CPL_TX_TNL_LSO_IPHDRCHKOUT(1U) + +#define S_CPL_TX_TNL_LSO_IPLENSETOUT 2 +#define M_CPL_TX_TNL_LSO_IPLENSETOUT 0x1 +#define V_CPL_TX_TNL_LSO_IPLENSETOUT(x) ((x) << S_CPL_TX_TNL_LSO_IPLENSETOUT) +#define G_CPL_TX_TNL_LSO_IPLENSETOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPLENSETOUT) & M_CPL_TX_TNL_LSO_IPLENSETOUT) +#define F_CPL_TX_TNL_LSO_IPLENSETOUT V_CPL_TX_TNL_LSO_IPLENSETOUT(1U) + +#define S_CPL_TX_TNL_LSO_IPIDINCOUT 1 +#define M_CPL_TX_TNL_LSO_IPIDINCOUT 0x1 +#define V_CPL_TX_TNL_LSO_IPIDINCOUT(x) ((x) << S_CPL_TX_TNL_LSO_IPIDINCOUT) +#define G_CPL_TX_TNL_LSO_IPIDINCOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPIDINCOUT) & M_CPL_TX_TNL_LSO_IPIDINCOUT) +#define F_CPL_TX_TNL_LSO_IPIDINCOUT V_CPL_TX_TNL_LSO_IPIDINCOUT(1U) + +#define S_CPL_TX_TNL_LSO_IPIDSPLITOUT 0 +#define M_CPL_TX_TNL_LSO_IPIDSPLITOUT 0x1 +#define V_CPL_TX_TNL_LSO_IPIDSPLITOUT(x) \ + ((x) << S_CPL_TX_TNL_LSO_IPIDSPLITOUT) +#define G_CPL_TX_TNL_LSO_IPIDSPLITOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPIDSPLITOUT) & M_CPL_TX_TNL_LSO_IPIDSPLITOUT) +#define F_CPL_TX_TNL_LSO_IPIDSPLITOUT V_CPL_TX_TNL_LSO_IPIDSPLITOUT(1U) + +#define S_CPL_TX_TNL_LSO_UDPLENSETOUT 15 +#define M_CPL_TX_TNL_LSO_UDPLENSETOUT 0x1 +#define V_CPL_TX_TNL_LSO_UDPLENSETOUT(x) \ + ((x) << S_CPL_TX_TNL_LSO_UDPLENSETOUT) +#define G_CPL_TX_TNL_LSO_UDPLENSETOUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_UDPLENSETOUT) & M_CPL_TX_TNL_LSO_UDPLENSETOUT) +#define F_CPL_TX_TNL_LSO_UDPLENSETOUT V_CPL_TX_TNL_LSO_UDPLENSETOUT(1U) + +#define S_CPL_TX_TNL_LSO_UDPCHKCLROUT 14 +#define M_CPL_TX_TNL_LSO_UDPCHKCLROUT 0x1 +#define V_CPL_TX_TNL_LSO_UDPCHKCLROUT(x) \ + ((x) << S_CPL_TX_TNL_LSO_UDPCHKCLROUT) +#define G_CPL_TX_TNL_LSO_UDPCHKCLROUT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_UDPCHKCLROUT) & M_CPL_TX_TNL_LSO_UDPCHKCLROUT) +#define F_CPL_TX_TNL_LSO_UDPCHKCLROUT V_CPL_TX_TNL_LSO_UDPCHKCLROUT(1U) + +#define S_CPL_TX_TNL_LSO_TNLTYPE 12 +#define M_CPL_TX_TNL_LSO_TNLTYPE 0x3 +#define V_CPL_TX_TNL_LSO_TNLTYPE(x) ((x) << S_CPL_TX_TNL_LSO_TNLTYPE) +#define G_CPL_TX_TNL_LSO_TNLTYPE(x) \ + (((x) >> S_CPL_TX_TNL_LSO_TNLTYPE) & M_CPL_TX_TNL_LSO_TNLTYPE) + +#define S_CPL_TX_TNL_LSO_TNLHDRLEN 0 +#define M_CPL_TX_TNL_LSO_TNLHDRLEN 0xfff +#define V_CPL_TX_TNL_LSO_TNLHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_TNLHDRLEN) +#define G_CPL_TX_TNL_LSO_TNLHDRLEN(x) \ + (((x) >> S_CPL_TX_TNL_LSO_TNLHDRLEN) & M_CPL_TX_TNL_LSO_TNLHDRLEN) + +#define S_CPL_TX_TNL_LSO_FLOW 21 +#define M_CPL_TX_TNL_LSO_FLOW 0x1 +#define V_CPL_TX_TNL_LSO_FLOW(x) ((x) << S_CPL_TX_TNL_LSO_FLOW) +#define G_CPL_TX_TNL_LSO_FLOW(x) \ + (((x) >> S_CPL_TX_TNL_LSO_FLOW) & M_CPL_TX_TNL_LSO_FLOW) +#define F_CPL_TX_TNL_LSO_FLOW V_CPL_TX_TNL_LSO_FLOW(1U) + +#define S_CPL_TX_TNL_LSO_IPV6 20 +#define M_CPL_TX_TNL_LSO_IPV6 0x1 +#define V_CPL_TX_TNL_LSO_IPV6(x) ((x) << S_CPL_TX_TNL_LSO_IPV6) +#define G_CPL_TX_TNL_LSO_IPV6(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPV6) & M_CPL_TX_TNL_LSO_IPV6) +#define F_CPL_TX_TNL_LSO_IPV6 V_CPL_TX_TNL_LSO_IPV6(1U) + +#define S_CPL_TX_TNL_LSO_ETHHDRLEN 16 +#define M_CPL_TX_TNL_LSO_ETHHDRLEN 0xf +#define V_CPL_TX_TNL_LSO_ETHHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLEN) +#define G_CPL_TX_TNL_LSO_ETHHDRLEN(x) \ + (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLEN) & M_CPL_TX_TNL_LSO_ETHHDRLEN) + +#define S_CPL_TX_TNL_LSO_IPHDRLEN 4 +#define M_CPL_TX_TNL_LSO_IPHDRLEN 0xfff +#define V_CPL_TX_TNL_LSO_IPHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_IPHDRLEN) +#define G_CPL_TX_TNL_LSO_IPHDRLEN(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPHDRLEN) & M_CPL_TX_TNL_LSO_IPHDRLEN) + +#define S_CPL_TX_TNL_LSO_TCPHDRLEN 0 +#define M_CPL_TX_TNL_LSO_TCPHDRLEN 0xf +#define V_CPL_TX_TNL_LSO_TCPHDRLEN(x) ((x) << S_CPL_TX_TNL_LSO_TCPHDRLEN) +#define G_CPL_TX_TNL_LSO_TCPHDRLEN(x) \ + (((x) >> S_CPL_TX_TNL_LSO_TCPHDRLEN) & M_CPL_TX_TNL_LSO_TCPHDRLEN) + +#define S_CPL_TX_TNL_LSO_IPIDSPLIT 15 +#define M_CPL_TX_TNL_LSO_IPIDSPLIT 0x1 +#define V_CPL_TX_TNL_LSO_IPIDSPLIT(x) ((x) << S_CPL_TX_TNL_LSO_IPIDSPLIT) +#define G_CPL_TX_TNL_LSO_IPIDSPLIT(x) \ + (((x) >> S_CPL_TX_TNL_LSO_IPIDSPLIT) & M_CPL_TX_TNL_LSO_IPIDSPLIT) +#define F_CPL_TX_TNL_LSO_IPIDSPLIT V_CPL_TX_TNL_LSO_IPIDSPLIT(1U) + +#define S_CPL_TX_TNL_LSO_ETHHDRLENX 14 +#define M_CPL_TX_TNL_LSO_ETHHDRLENX 0x1 +#define V_CPL_TX_TNL_LSO_ETHHDRLENX(x) ((x) << S_CPL_TX_TNL_LSO_ETHHDRLENX) +#define G_CPL_TX_TNL_LSO_ETHHDRLENX(x) \ + (((x) >> S_CPL_TX_TNL_LSO_ETHHDRLENX) & M_CPL_TX_TNL_LSO_ETHHDRLENX) +#define F_CPL_TX_TNL_LSO_ETHHDRLENX V_CPL_TX_TNL_LSO_ETHHDRLENX(1U) + +#define S_CPL_TX_TNL_LSO_MSS 0 +#define M_CPL_TX_TNL_LSO_MSS 0x3fff +#define V_CPL_TX_TNL_LSO_MSS(x) ((x) << S_CPL_TX_TNL_LSO_MSS) +#define G_CPL_TX_TNL_LSO_MSS(x) \ + (((x) >> S_CPL_TX_TNL_LSO_MSS) & M_CPL_TX_TNL_LSO_MSS) + +#define S_CPL_TX_TNL_LSO_ETHLENOFFSET 28 +#define M_CPL_TX_TNL_LSO_ETHLENOFFSET 0xf +#define V_CPL_TX_TNL_LSO_ETHLENOFFSET(x) \ + ((x) << S_CPL_TX_TNL_LSO_ETHLENOFFSET) +#define G_CPL_TX_TNL_LSO_ETHLENOFFSET(x) \ + (((x) >> S_CPL_TX_TNL_LSO_ETHLENOFFSET) & M_CPL_TX_TNL_LSO_ETHLENOFFSET) + +#define S_CPL_TX_TNL_LSO_SIZE 0 +#define M_CPL_TX_TNL_LSO_SIZE 0xfffffff +#define V_CPL_TX_TNL_LSO_SIZE(x) ((x) << S_CPL_TX_TNL_LSO_SIZE) +#define G_CPL_TX_TNL_LSO_SIZE(x) \ + (((x) >> S_CPL_TX_TNL_LSO_SIZE) & M_CPL_TX_TNL_LSO_SIZE) + +struct cpl_rx_mps_pkt { + __be32 op_to_r1_hi; + __be32 r1_lo_length; +}; + +#define S_CPL_RX_MPS_PKT_OP 24 +#define M_CPL_RX_MPS_PKT_OP 0xff +#define V_CPL_RX_MPS_PKT_OP(x) ((x) << S_CPL_RX_MPS_PKT_OP) +#define G_CPL_RX_MPS_PKT_OP(x) \ + (((x) >> S_CPL_RX_MPS_PKT_OP) & M_CPL_RX_MPS_PKT_OP) + +#define S_CPL_RX_MPS_PKT_TYPE 20 +#define M_CPL_RX_MPS_PKT_TYPE 0xf +#define V_CPL_RX_MPS_PKT_TYPE(x) ((x) << S_CPL_RX_MPS_PKT_TYPE) +#define G_CPL_RX_MPS_PKT_TYPE(x) \ + (((x) >> S_CPL_RX_MPS_PKT_TYPE) & M_CPL_RX_MPS_PKT_TYPE) + +/* + * Values for CPL_RX_MPS_PKT_TYPE, a bit-wise orthogonal field. + */ +#define X_CPL_RX_MPS_PKT_TYPE_PAUSE (1 << 0) +#define X_CPL_RX_MPS_PKT_TYPE_PPP (1 << 1) +#define X_CPL_RX_MPS_PKT_TYPE_QFC (1 << 2) +#define X_CPL_RX_MPS_PKT_TYPE_PTP (1 << 3) + #endif /* T4_MSG_H */ diff --git a/sys/dev/cxgbe/common/t4_regs.h b/sys/dev/cxgbe/common/t4_regs.h index 1ea909eb1a3e..0525424ebce2 100644 --- a/sys/dev/cxgbe/common/t4_regs.h +++ b/sys/dev/cxgbe/common/t4_regs.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2013 Chelsio Communications, Inc. + * Copyright (c) 2013, 2016 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -28,6 +28,10 @@ */ /* This file is automatically generated --- changes will be lost */ +/* Generation Date : Wed Jan 27 10:57:51 IST 2016 */ +/* Directory name: t4_reg.txt, Changeset: */ +/* Directory name: t5_reg.txt, Changeset: 6936:7f6342b03d61 */ +/* Directory name: t6_reg.txt, Changeset: 4191:ce3ccd95c109 */ #define MYPF_BASE 0x1b000 #define MYPF_REG(reg_addr) (MYPF_BASE + (reg_addr)) @@ -368,9 +372,141 @@ #define EDC_H_ECC_ERR_DATA_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) #define NUM_EDC_H_ECC_ERR_DATA_INSTANCES 16 +#define SGE_DEBUG1_DBP_THREAD(idx) (A_SGE_DEBUG1_DBP_THREAD + (idx) * 4) +#define NUM_SGE_DEBUG1_DBP_THREAD_INSTANCES 4 + +#define SGE_DEBUG0_DBP_THREAD(idx) (A_SGE_DEBUG0_DBP_THREAD + (idx) * 4) +#define NUM_SGE_DEBUG0_DBP_THREAD_INSTANCES 5 + +#define SGE_WC_EGRS_BAR2_OFF_PF(idx) (A_SGE_WC_EGRS_BAR2_OFF_PF + (idx) * 4) +#define NUM_SGE_WC_EGRS_BAR2_OFF_PF_INSTANCES 8 + +#define SGE_WC_EGRS_BAR2_OFF_VF(idx) (A_SGE_WC_EGRS_BAR2_OFF_VF + (idx) * 4) +#define NUM_SGE_WC_EGRS_BAR2_OFF_VF_INSTANCES 8 + +#define PCIE_T6_DMA_REG(reg_addr, idx) ((reg_addr) + (idx) * 16) +#define NUM_PCIE_T6_DMA_INSTANCES 2 + +#define PCIE_T6_CMD_REG(reg_addr, idx) ((reg_addr) + (idx) * 16) +#define NUM_PCIE_T6_CMD_INSTANCES 1 + +#define PCIE_VF_256_INT_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_PCIE_VF_256_INT_INSTANCES 128 + +#define MPS_CLS_REQUEST_TRACE_MAC_DA_L(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_DA_L + (idx) * 32) +#define NUM_MPS_CLS_REQUEST_TRACE_MAC_DA_L_INSTANCES 8 + +#define MPS_CLS_REQUEST_TRACE_MAC_DA_H(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_DA_H + (idx) * 32) +#define NUM_MPS_CLS_REQUEST_TRACE_MAC_DA_H_INSTANCES 8 + +#define MPS_CLS_REQUEST_TRACE_MAC_SA_L(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_SA_L + (idx) * 32) +#define NUM_MPS_CLS_REQUEST_TRACE_MAC_SA_L_INSTANCES 8 + +#define MPS_CLS_REQUEST_TRACE_MAC_SA_H(idx) (A_MPS_CLS_REQUEST_TRACE_MAC_SA_H + (idx) * 32) +#define NUM_MPS_CLS_REQUEST_TRACE_MAC_SA_H_INSTANCES 8 + +#define MPS_CLS_REQUEST_TRACE_PORT_VLAN(idx) (A_MPS_CLS_REQUEST_TRACE_PORT_VLAN + (idx) * 32) +#define NUM_MPS_CLS_REQUEST_TRACE_PORT_VLAN_INSTANCES 8 + +#define MPS_CLS_REQUEST_TRACE_ENCAP(idx) (A_MPS_CLS_REQUEST_TRACE_ENCAP + (idx) * 32) +#define NUM_MPS_CLS_REQUEST_TRACE_ENCAP_INSTANCES 8 + +#define MPS_CLS_RESULT_TRACE(idx) (A_MPS_CLS_RESULT_TRACE + (idx) * 4) +#define NUM_MPS_CLS_RESULT_TRACE_INSTANCES 8 + +#define MPS_CLS_DIPIPV4_ID_TABLE(idx) (A_MPS_CLS_DIPIPV4_ID_TABLE + (idx) * 8) +#define NUM_MPS_CLS_DIPIPV4_ID_TABLE_INSTANCES 4 + +#define MPS_CLS_DIPIPV4_MASK_TABLE(idx) (A_MPS_CLS_DIPIPV4_MASK_TABLE + (idx) * 8) +#define NUM_MPS_CLS_DIPIPV4_MASK_TABLE_INSTANCES 4 + +#define MPS_CLS_DIPIPV6ID_0_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_0_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6ID_0_TABLE_INSTANCES 2 + +#define MPS_CLS_DIPIPV6ID_1_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_1_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6ID_1_TABLE_INSTANCES 2 + +#define MPS_CLS_DIPIPV6ID_2_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_2_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6ID_2_TABLE_INSTANCES 2 + +#define MPS_CLS_DIPIPV6ID_3_TABLE(idx) (A_MPS_CLS_DIPIPV6ID_3_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6ID_3_TABLE_INSTANCES 2 + +#define MPS_CLS_DIPIPV6MASK_0_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_0_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6MASK_0_TABLE_INSTANCES 2 + +#define MPS_CLS_DIPIPV6MASK_1_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_1_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6MASK_1_TABLE_INSTANCES 2 + +#define MPS_CLS_DIPIPV6MASK_2_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_2_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6MASK_2_TABLE_INSTANCES 2 + +#define MPS_CLS_DIPIPV6MASK_3_TABLE(idx) (A_MPS_CLS_DIPIPV6MASK_3_TABLE + (idx) * 32) +#define NUM_MPS_CLS_DIPIPV6MASK_3_TABLE_INSTANCES 2 + +#define MPS_RX_HASH_LKP_TABLE(idx) (A_MPS_RX_HASH_LKP_TABLE + (idx) * 4) +#define NUM_MPS_RX_HASH_LKP_TABLE_INSTANCES 4 + +#define LE_DB_DBG_MATCH_DATA_MASK(idx) (A_LE_DB_DBG_MATCH_DATA_MASK + (idx) * 4) +#define NUM_LE_DB_DBG_MATCH_DATA_MASK_INSTANCES 8 + +#define LE_DB_DBG_MATCH_DATA(idx) (A_LE_DB_DBG_MATCH_DATA + (idx) * 4) +#define NUM_LE_DB_DBG_MATCH_DATA_INSTANCES 8 + +#define LE_DB_DBGI_REQ_DATA_T6(idx) (A_LE_DB_DBGI_REQ_DATA + (idx) * 4) +#define NUM_LE_DB_DBGI_REQ_DATA_T6_INSTANCES 11 + +#define LE_DB_DBGI_REQ_MASK_T6(idx) (A_LE_DB_DBGI_REQ_MASK + (idx) * 4) +#define NUM_LE_DB_DBGI_REQ_MASK_T6_INSTANCES 11 + +#define LE_DB_DBGI_RSP_DATA_T6(idx) (A_LE_DB_DBGI_RSP_DATA + (idx) * 4) +#define NUM_LE_DB_DBGI_RSP_DATA_T6_INSTANCES 11 + +#define LE_DB_ACTIVE_MASK_IPV6_T6(idx) (A_LE_DB_ACTIVE_MASK_IPV6 + (idx) * 4) +#define NUM_LE_DB_ACTIVE_MASK_IPV6_T6_INSTANCES 8 + +#define LE_HASH_MASK_GEN_IPV4T6(idx) (A_LE_HASH_MASK_GEN_IPV4T5 + (idx) * 4) +#define NUM_LE_HASH_MASK_GEN_IPV4T6_INSTANCES 8 + +#define T6_LE_HASH_MASK_GEN_IPV6T5(idx) (A_T6_LE_HASH_MASK_GEN_IPV6T5 + (idx) * 4) +#define NUM_T6_LE_HASH_MASK_GEN_IPV6T5_INSTANCES 8 + +#define LE_DB_PSV_FILTER_MASK_TUP_IPV4(idx) (A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 + (idx) * 4) +#define NUM_LE_DB_PSV_FILTER_MASK_TUP_IPV4_INSTANCES 3 + +#define LE_DB_PSV_FILTER_MASK_FLT_IPV4(idx) (A_LE_DB_PSV_FILTER_MASK_FLT_IPV4 + (idx) * 4) +#define NUM_LE_DB_PSV_FILTER_MASK_FLT_IPV4_INSTANCES 2 + +#define LE_DB_PSV_FILTER_MASK_TUP_IPV6(idx) (A_LE_DB_PSV_FILTER_MASK_TUP_IPV6 + (idx) * 4) +#define NUM_LE_DB_PSV_FILTER_MASK_TUP_IPV6_INSTANCES 9 + +#define LE_DB_PSV_FILTER_MASK_FLT_IPV6(idx) (A_LE_DB_PSV_FILTER_MASK_FLT_IPV6 + (idx) * 4) +#define NUM_LE_DB_PSV_FILTER_MASK_FLT_IPV6_INSTANCES 2 + +#define LE_DB_SECOND_GEN_HASH_MASK_IPV4_T6(idx) (A_LE_DB_SECOND_GEN_HASH_MASK_IPV4 + (idx) * 4) +#define NUM_LE_DB_SECOND_GEN_HASH_MASK_IPV4_T6_INSTANCES 8 + +#define MC_DDRPHY_DP18_T6_REG(reg_addr, idx) ((reg_addr) + (idx) * 512) +#define NUM_MC_DDRPHY_DP18_T6_INSTANCES 9 + +#define MC_CE_ERR_DATA_T6_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_MC_CE_ERR_DATA_T6_INSTANCES 16 + +#define MC_UE_ERR_DATA_T6_REG(reg_addr, idx) ((reg_addr) + (idx) * 4) +#define NUM_MC_UE_ERR_DATA_T6_INSTANCES 16 + +#define CIM_CTL_MAILBOX_VF_STATUS_T6(idx) (A_CIM_CTL_MAILBOX_VF_STATUS + (idx) * 4) +#define NUM_CIM_CTL_MAILBOX_VF_STATUS_T6_INSTANCES 8 + +#define CIM_CTL_MAILBOX_VFN_CTL_T6(idx) (A_CIM_CTL_MAILBOX_VFN_CTL + (idx) * 4) +#define NUM_CIM_CTL_MAILBOX_VFN_CTL_T6_INSTANCES 256 + #define EDC_STRIDE (EDC_1_BASE_ADDR - EDC_0_BASE_ADDR) #define EDC_REG(reg, idx) (reg + EDC_STRIDE * idx) +#define EDC_T5_STRIDE (EDC_T51_BASE_ADDR - EDC_T50_BASE_ADDR) +#define EDC_T5_REG(reg, idx) (reg + EDC_T5_STRIDE * idx) + /* registers for module SGE */ #define SGE_BASE_ADDR 0x1000 @@ -401,6 +537,10 @@ #define V_PIDX_T5(x) ((x) << S_PIDX_T5) #define G_PIDX_T5(x) (((x) >> S_PIDX_T5) & M_PIDX_T5) +#define S_SYNC_T6 14 +#define V_SYNC_T6(x) ((x) << S_SYNC_T6) +#define F_SYNC_T6 V_SYNC_T6(1U) + #define A_SGE_PF_GTS 0x4 #define S_INGRESSQID 16 @@ -793,6 +933,14 @@ #define V_PERR_PC_CHPI_RSP2(x) ((x) << S_PERR_PC_CHPI_RSP2) #define F_PERR_PC_CHPI_RSP2 V_PERR_PC_CHPI_RSP2(1U) +#define S_PERR_PC_RSP 23 +#define V_PERR_PC_RSP(x) ((x) << S_PERR_PC_RSP) +#define F_PERR_PC_RSP V_PERR_PC_RSP(1U) + +#define S_PERR_PC_REQ 22 +#define V_PERR_PC_REQ(x) ((x) << S_PERR_PC_REQ) +#define F_PERR_PC_REQ V_PERR_PC_REQ(1U) + #define A_SGE_INT_ENABLE1 0x1028 #define A_SGE_PERR_ENABLE1 0x102c #define A_SGE_INT_CAUSE2 0x1030 @@ -937,6 +1085,26 @@ #define V_PERR_PC_DBP2(x) ((x) << S_PERR_PC_DBP2) #define F_PERR_PC_DBP2 V_PERR_PC_DBP2(1U) +#define S_DEQ_LL_PERR 21 +#define V_DEQ_LL_PERR(x) ((x) << S_DEQ_LL_PERR) +#define F_DEQ_LL_PERR V_DEQ_LL_PERR(1U) + +#define S_ENQ_PERR 20 +#define V_ENQ_PERR(x) ((x) << S_ENQ_PERR) +#define F_ENQ_PERR V_ENQ_PERR(1U) + +#define S_DEQ_OUT_PERR 19 +#define V_DEQ_OUT_PERR(x) ((x) << S_DEQ_OUT_PERR) +#define F_DEQ_OUT_PERR V_DEQ_OUT_PERR(1U) + +#define S_BUF_PERR 18 +#define V_BUF_PERR(x) ((x) << S_BUF_PERR) +#define F_BUF_PERR V_BUF_PERR(1U) + +#define S_PERR_DB_FIFO 3 +#define V_PERR_DB_FIFO(x) ((x) << S_PERR_DB_FIFO) +#define F_PERR_DB_FIFO V_PERR_DB_FIFO(1U) + #define A_SGE_INT_ENABLE2 0x1034 #define A_SGE_PERR_ENABLE2 0x1038 #define A_SGE_INT_CAUSE3 0x103c @@ -1069,6 +1237,14 @@ #define V_ERR_INV_CTXT0(x) ((x) << S_ERR_INV_CTXT0) #define F_ERR_INV_CTXT0 V_ERR_INV_CTXT0(1U) +#define S_DBP_TBUF_FULL 8 +#define V_DBP_TBUF_FULL(x) ((x) << S_DBP_TBUF_FULL) +#define F_DBP_TBUF_FULL V_DBP_TBUF_FULL(1U) + +#define S_FATAL_WRE_LEN 7 +#define V_FATAL_WRE_LEN(x) ((x) << S_FATAL_WRE_LEN) +#define F_FATAL_WRE_LEN V_FATAL_WRE_LEN(1U) + #define A_SGE_INT_ENABLE3 0x1040 #define A_SGE_FL_BUFFER_SIZE0 0x1044 @@ -1077,21 +1253,116 @@ #define V_SIZE(x) ((x) << S_SIZE) #define G_SIZE(x) (((x) >> S_SIZE) & CXGBE_M_SIZE) +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE1 0x1048 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE2 0x104c + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE3 0x1050 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE4 0x1054 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE5 0x1058 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE6 0x105c + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE7 0x1060 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE8 0x1064 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE9 0x1068 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE10 0x106c + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE11 0x1070 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE12 0x1074 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE13 0x1078 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE14 0x107c + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_FL_BUFFER_SIZE15 0x1080 + +#define S_T6_SIZE 4 +#define M_T6_SIZE 0xfffffU +#define V_T6_SIZE(x) ((x) << S_T6_SIZE) +#define G_T6_SIZE(x) (((x) >> S_T6_SIZE) & M_T6_SIZE) + #define A_SGE_DBQ_CTXT_BADDR 0x1084 #define S_BASEADDR 3 @@ -1146,6 +1417,15 @@ #define V_CREDITCNTPACKING(x) ((x) << S_CREDITCNTPACKING) #define G_CREDITCNTPACKING(x) (((x) >> S_CREDITCNTPACKING) & M_CREDITCNTPACKING) +#define S_NULLPTR 20 +#define M_NULLPTR 0xfU +#define V_NULLPTR(x) ((x) << S_NULLPTR) +#define G_NULLPTR(x) (((x) >> S_NULLPTR) & M_NULLPTR) + +#define S_NULLPTREN 19 +#define V_NULLPTREN(x) ((x) << S_NULLPTREN) +#define F_NULLPTREN V_NULLPTREN(1U) + #define A_SGE_CONM_CTRL 0x1094 #define S_EGRTHRESHOLD 8 @@ -1171,6 +1451,16 @@ #define V_EGRTHRESHOLDPACKING(x) ((x) << S_EGRTHRESHOLDPACKING) #define G_EGRTHRESHOLDPACKING(x) (((x) >> S_EGRTHRESHOLDPACKING) & M_EGRTHRESHOLDPACKING) +#define S_T6_EGRTHRESHOLDPACKING 16 +#define M_T6_EGRTHRESHOLDPACKING 0xffU +#define V_T6_EGRTHRESHOLDPACKING(x) ((x) << S_T6_EGRTHRESHOLDPACKING) +#define G_T6_EGRTHRESHOLDPACKING(x) (((x) >> S_T6_EGRTHRESHOLDPACKING) & M_T6_EGRTHRESHOLDPACKING) + +#define S_T6_EGRTHRESHOLD 8 +#define M_T6_EGRTHRESHOLD 0xffU +#define V_T6_EGRTHRESHOLD(x) ((x) << S_T6_EGRTHRESHOLD) +#define G_T6_EGRTHRESHOLD(x) (((x) >> S_T6_EGRTHRESHOLD) & M_T6_EGRTHRESHOLD) + #define A_SGE_TIMESTAMP_LO 0x1098 #define A_SGE_TIMESTAMP_HI 0x109c @@ -1246,6 +1536,21 @@ #define V_LP_COUNT_T5(x) ((x) << S_LP_COUNT_T5) #define G_LP_COUNT_T5(x) (((x) >> S_LP_COUNT_T5) & M_LP_COUNT_T5) +#define S_VFIFO_CNT 15 +#define M_VFIFO_CNT 0x1ffffU +#define V_VFIFO_CNT(x) ((x) << S_VFIFO_CNT) +#define G_VFIFO_CNT(x) (((x) >> S_VFIFO_CNT) & M_VFIFO_CNT) + +#define S_COAL_CTL_FIFO_CNT 8 +#define M_COAL_CTL_FIFO_CNT 0x3fU +#define V_COAL_CTL_FIFO_CNT(x) ((x) << S_COAL_CTL_FIFO_CNT) +#define G_COAL_CTL_FIFO_CNT(x) (((x) >> S_COAL_CTL_FIFO_CNT) & M_COAL_CTL_FIFO_CNT) + +#define S_MERGE_FIFO_CNT 0 +#define M_MERGE_FIFO_CNT 0x3fU +#define V_MERGE_FIFO_CNT(x) ((x) << S_MERGE_FIFO_CNT) +#define G_MERGE_FIFO_CNT(x) (((x) >> S_MERGE_FIFO_CNT) & M_MERGE_FIFO_CNT) + #define A_SGE_DOORBELL_CONTROL 0x10a8 #define S_HINTDEPTHCTL 27 @@ -1315,6 +1620,32 @@ #define V_DROPPED_DB(x) ((x) << S_DROPPED_DB) #define F_DROPPED_DB V_DROPPED_DB(1U) +#define S_T6_DROP_TIMEOUT 7 +#define M_T6_DROP_TIMEOUT 0x3fU +#define V_T6_DROP_TIMEOUT(x) ((x) << S_T6_DROP_TIMEOUT) +#define G_T6_DROP_TIMEOUT(x) (((x) >> S_T6_DROP_TIMEOUT) & M_T6_DROP_TIMEOUT) + +#define S_INVONDBSYNC 6 +#define V_INVONDBSYNC(x) ((x) << S_INVONDBSYNC) +#define F_INVONDBSYNC V_INVONDBSYNC(1U) + +#define S_INVONGTSSYNC 5 +#define V_INVONGTSSYNC(x) ((x) << S_INVONGTSSYNC) +#define F_INVONGTSSYNC V_INVONGTSSYNC(1U) + +#define S_DB_DBG_EN 4 +#define V_DB_DBG_EN(x) ((x) << S_DB_DBG_EN) +#define F_DB_DBG_EN V_DB_DBG_EN(1U) + +#define S_GTS_DBG_TIMER_REG 1 +#define M_GTS_DBG_TIMER_REG 0x7U +#define V_GTS_DBG_TIMER_REG(x) ((x) << S_GTS_DBG_TIMER_REG) +#define G_GTS_DBG_TIMER_REG(x) (((x) >> S_GTS_DBG_TIMER_REG) & M_GTS_DBG_TIMER_REG) + +#define S_GTS_DBG_EN 0 +#define V_GTS_DBG_EN(x) ((x) << S_GTS_DBG_EN) +#define F_GTS_DBG_EN V_GTS_DBG_EN(1U) + #define A_SGE_DROPPED_DOORBELL 0x10ac #define A_SGE_DOORBELL_THROTTLE_CONTROL 0x10b0 @@ -1360,6 +1691,11 @@ #define V_LL_READ_WAIT_DISABLE(x) ((x) << S_LL_READ_WAIT_DISABLE) #define F_LL_READ_WAIT_DISABLE V_LL_READ_WAIT_DISABLE(1U) +#define S_TSCALE 28 +#define M_TSCALE 0xfU +#define V_TSCALE(x) ((x) << S_TSCALE) +#define G_TSCALE(x) (((x) >> S_TSCALE) & M_TSCALE) + #define A_SGE_TIMER_VALUE_0_AND_1 0x10b8 #define S_TIMERVALUE0 16 @@ -1426,6 +1762,39 @@ #define V_MAXRSPCNT1(x) ((x) << S_MAXRSPCNT1) #define G_MAXRSPCNT1(x) (((x) >> S_MAXRSPCNT1) & M_MAXRSPCNT1) +#define A_SGE_GK_CONTROL 0x10c4 + +#define S_EN_FLM_FIFTH 29 +#define V_EN_FLM_FIFTH(x) ((x) << S_EN_FLM_FIFTH) +#define F_EN_FLM_FIFTH V_EN_FLM_FIFTH(1U) + +#define S_FL_PROG_THRESH 20 +#define M_FL_PROG_THRESH 0x1ffU +#define V_FL_PROG_THRESH(x) ((x) << S_FL_PROG_THRESH) +#define G_FL_PROG_THRESH(x) (((x) >> S_FL_PROG_THRESH) & M_FL_PROG_THRESH) + +#define S_COAL_ALL_THREAD 19 +#define V_COAL_ALL_THREAD(x) ((x) << S_COAL_ALL_THREAD) +#define F_COAL_ALL_THREAD V_COAL_ALL_THREAD(1U) + +#define S_EN_PSHB 18 +#define V_EN_PSHB(x) ((x) << S_EN_PSHB) +#define F_EN_PSHB V_EN_PSHB(1U) + +#define S_EN_DB_FIFTH 17 +#define V_EN_DB_FIFTH(x) ((x) << S_EN_DB_FIFTH) +#define F_EN_DB_FIFTH V_EN_DB_FIFTH(1U) + +#define S_DB_PROG_THRESH 8 +#define M_DB_PROG_THRESH 0x1ffU +#define V_DB_PROG_THRESH(x) ((x) << S_DB_PROG_THRESH) +#define G_DB_PROG_THRESH(x) (((x) >> S_DB_PROG_THRESH) & M_DB_PROG_THRESH) + +#define S_100NS_TIMER 0 +#define M_100NS_TIMER 0xffU +#define V_100NS_TIMER(x) ((x) << S_100NS_TIMER) +#define G_100NS_TIMER(x) (((x) >> S_100NS_TIMER) & M_100NS_TIMER) + #define A_SGE_PD_RSP_CREDIT23 0x10c8 #define S_RSPCREDITEN2 31 @@ -1456,6 +1825,23 @@ #define V_MAXRSPCNT3(x) ((x) << S_MAXRSPCNT3) #define G_MAXRSPCNT3(x) (((x) >> S_MAXRSPCNT3) & M_MAXRSPCNT3) +#define A_SGE_GK_CONTROL2 0x10c8 + +#define S_DBQ_TIMER_TICK 16 +#define M_DBQ_TIMER_TICK 0xffffU +#define V_DBQ_TIMER_TICK(x) ((x) << S_DBQ_TIMER_TICK) +#define G_DBQ_TIMER_TICK(x) (((x) >> S_DBQ_TIMER_TICK) & M_DBQ_TIMER_TICK) + +#define S_FL_MERGE_CNT_THRESH 8 +#define M_FL_MERGE_CNT_THRESH 0xfU +#define V_FL_MERGE_CNT_THRESH(x) ((x) << S_FL_MERGE_CNT_THRESH) +#define G_FL_MERGE_CNT_THRESH(x) (((x) >> S_FL_MERGE_CNT_THRESH) & M_FL_MERGE_CNT_THRESH) + +#define S_MERGE_CNT_THRESH 0 +#define M_MERGE_CNT_THRESH 0x3fU +#define V_MERGE_CNT_THRESH(x) ((x) << S_MERGE_CNT_THRESH) +#define G_MERGE_CNT_THRESH(x) (((x) >> S_MERGE_CNT_THRESH) & M_MERGE_CNT_THRESH) + #define A_SGE_DEBUG_INDEX 0x10cc #define A_SGE_DEBUG_DATA_HIGH 0x10d0 #define A_SGE_DEBUG_DATA_LOW 0x10d4 @@ -1582,6 +1968,30 @@ #define V_ERR_RX_CPL_PACKET_SIZE0(x) ((x) << S_ERR_RX_CPL_PACKET_SIZE0) #define F_ERR_RX_CPL_PACKET_SIZE0 V_ERR_RX_CPL_PACKET_SIZE0(1U) +#define S_ERR_ISHIFT_UR1 31 +#define V_ERR_ISHIFT_UR1(x) ((x) << S_ERR_ISHIFT_UR1) +#define F_ERR_ISHIFT_UR1 V_ERR_ISHIFT_UR1(1U) + +#define S_ERR_ISHIFT_UR0 30 +#define V_ERR_ISHIFT_UR0(x) ((x) << S_ERR_ISHIFT_UR0) +#define F_ERR_ISHIFT_UR0 V_ERR_ISHIFT_UR0(1U) + +#define S_ERR_TH3_MAX_FETCH 14 +#define V_ERR_TH3_MAX_FETCH(x) ((x) << S_ERR_TH3_MAX_FETCH) +#define F_ERR_TH3_MAX_FETCH V_ERR_TH3_MAX_FETCH(1U) + +#define S_ERR_TH2_MAX_FETCH 13 +#define V_ERR_TH2_MAX_FETCH(x) ((x) << S_ERR_TH2_MAX_FETCH) +#define F_ERR_TH2_MAX_FETCH V_ERR_TH2_MAX_FETCH(1U) + +#define S_ERR_TH1_MAX_FETCH 12 +#define V_ERR_TH1_MAX_FETCH(x) ((x) << S_ERR_TH1_MAX_FETCH) +#define F_ERR_TH1_MAX_FETCH V_ERR_TH1_MAX_FETCH(1U) + +#define S_ERR_TH0_MAX_FETCH 11 +#define V_ERR_TH0_MAX_FETCH(x) ((x) << S_ERR_TH0_MAX_FETCH) +#define F_ERR_TH0_MAX_FETCH V_ERR_TH0_MAX_FETCH(1U) + #define A_SGE_INT_ENABLE4 0x10e0 #define A_SGE_STAT_TOTAL 0x10e4 #define A_SGE_STAT_MATCH 0x10e8 @@ -1616,6 +2026,11 @@ #define V_STATSOURCE_T5(x) ((x) << S_STATSOURCE_T5) #define G_STATSOURCE_T5(x) (((x) >> S_STATSOURCE_T5) & M_STATSOURCE_T5) +#define S_T6_STATMODE 0 +#define M_T6_STATMODE 0xfU +#define V_T6_STATMODE(x) ((x) << S_T6_STATMODE) +#define G_T6_STATMODE(x) (((x) >> S_T6_STATMODE) & M_T6_STATMODE) + #define A_SGE_HINT_CFG 0x10f0 #define S_HINTSALLOWEDNOHDR 6 @@ -1689,6 +2104,7 @@ #define V_MINTAG0(x) ((x) << S_MINTAG0) #define G_MINTAG0(x) (((x) >> S_MINTAG0) & M_MINTAG0) +#define A_SGE_IDMA0_DROP_CNT 0x1104 #define A_SGE_SHARED_TAG_POOL_CFG 0x1108 #define S_TAGPOOLTOTAL 0 @@ -1696,6 +2112,7 @@ #define V_TAGPOOLTOTAL(x) ((x) << S_TAGPOOLTOTAL) #define G_TAGPOOLTOTAL(x) (((x) >> S_TAGPOOLTOTAL) & M_TAGPOOLTOTAL) +#define A_SGE_IDMA1_DROP_CNT 0x1108 #define A_SGE_INT_CAUSE5 0x110c #define S_ERR_T_RXCRC 31 @@ -1992,6 +2409,90 @@ #define V_EDMA0_SLEEP_REQ(x) ((x) << S_EDMA0_SLEEP_REQ) #define F_EDMA0_SLEEP_REQ V_EDMA0_SLEEP_REQ(1U) +#define A_SGE_INT_CAUSE6 0x1128 + +#define S_ERR_DB_SYNC 21 +#define V_ERR_DB_SYNC(x) ((x) << S_ERR_DB_SYNC) +#define F_ERR_DB_SYNC V_ERR_DB_SYNC(1U) + +#define S_ERR_GTS_SYNC 20 +#define V_ERR_GTS_SYNC(x) ((x) << S_ERR_GTS_SYNC) +#define F_ERR_GTS_SYNC V_ERR_GTS_SYNC(1U) + +#define S_FATAL_LARGE_COAL 19 +#define V_FATAL_LARGE_COAL(x) ((x) << S_FATAL_LARGE_COAL) +#define F_FATAL_LARGE_COAL V_FATAL_LARGE_COAL(1U) + +#define S_PL_BAR2_FRM_ERR 18 +#define V_PL_BAR2_FRM_ERR(x) ((x) << S_PL_BAR2_FRM_ERR) +#define F_PL_BAR2_FRM_ERR V_PL_BAR2_FRM_ERR(1U) + +#define S_SILENT_DROP_TX_COAL 17 +#define V_SILENT_DROP_TX_COAL(x) ((x) << S_SILENT_DROP_TX_COAL) +#define F_SILENT_DROP_TX_COAL V_SILENT_DROP_TX_COAL(1U) + +#define S_ERR_INV_CTXT4 16 +#define V_ERR_INV_CTXT4(x) ((x) << S_ERR_INV_CTXT4) +#define F_ERR_INV_CTXT4 V_ERR_INV_CTXT4(1U) + +#define S_ERR_BAD_DB_PIDX4 15 +#define V_ERR_BAD_DB_PIDX4(x) ((x) << S_ERR_BAD_DB_PIDX4) +#define F_ERR_BAD_DB_PIDX4 V_ERR_BAD_DB_PIDX4(1U) + +#define S_ERR_BAD_UPFL_INC_CREDIT4 14 +#define V_ERR_BAD_UPFL_INC_CREDIT4(x) ((x) << S_ERR_BAD_UPFL_INC_CREDIT4) +#define F_ERR_BAD_UPFL_INC_CREDIT4 V_ERR_BAD_UPFL_INC_CREDIT4(1U) + +#define S_FATAL_TAG_MISMATCH 13 +#define V_FATAL_TAG_MISMATCH(x) ((x) << S_FATAL_TAG_MISMATCH) +#define F_FATAL_TAG_MISMATCH V_FATAL_TAG_MISMATCH(1U) + +#define S_FATAL_ENQ_CTL_RDY 12 +#define V_FATAL_ENQ_CTL_RDY(x) ((x) << S_FATAL_ENQ_CTL_RDY) +#define F_FATAL_ENQ_CTL_RDY V_FATAL_ENQ_CTL_RDY(1U) + +#define S_ERR_PC_RSP_LEN3 11 +#define V_ERR_PC_RSP_LEN3(x) ((x) << S_ERR_PC_RSP_LEN3) +#define F_ERR_PC_RSP_LEN3 V_ERR_PC_RSP_LEN3(1U) + +#define S_ERR_PC_RSP_LEN2 10 +#define V_ERR_PC_RSP_LEN2(x) ((x) << S_ERR_PC_RSP_LEN2) +#define F_ERR_PC_RSP_LEN2 V_ERR_PC_RSP_LEN2(1U) + +#define S_ERR_PC_RSP_LEN1 9 +#define V_ERR_PC_RSP_LEN1(x) ((x) << S_ERR_PC_RSP_LEN1) +#define F_ERR_PC_RSP_LEN1 V_ERR_PC_RSP_LEN1(1U) + +#define S_ERR_PC_RSP_LEN0 8 +#define V_ERR_PC_RSP_LEN0(x) ((x) << S_ERR_PC_RSP_LEN0) +#define F_ERR_PC_RSP_LEN0 V_ERR_PC_RSP_LEN0(1U) + +#define S_FATAL_ENQ2LL_VLD 7 +#define V_FATAL_ENQ2LL_VLD(x) ((x) << S_FATAL_ENQ2LL_VLD) +#define F_FATAL_ENQ2LL_VLD V_FATAL_ENQ2LL_VLD(1U) + +#define S_FATAL_LL_EMPTY 6 +#define V_FATAL_LL_EMPTY(x) ((x) << S_FATAL_LL_EMPTY) +#define F_FATAL_LL_EMPTY V_FATAL_LL_EMPTY(1U) + +#define S_FATAL_OFF_WDENQ 5 +#define V_FATAL_OFF_WDENQ(x) ((x) << S_FATAL_OFF_WDENQ) +#define F_FATAL_OFF_WDENQ V_FATAL_OFF_WDENQ(1U) + +#define S_FATAL_DEQ_DRDY 3 +#define M_FATAL_DEQ_DRDY 0x3U +#define V_FATAL_DEQ_DRDY(x) ((x) << S_FATAL_DEQ_DRDY) +#define G_FATAL_DEQ_DRDY(x) (((x) >> S_FATAL_DEQ_DRDY) & M_FATAL_DEQ_DRDY) + +#define S_FATAL_OUTP_DRDY 1 +#define M_FATAL_OUTP_DRDY 0x3U +#define V_FATAL_OUTP_DRDY(x) ((x) << S_FATAL_OUTP_DRDY) +#define G_FATAL_OUTP_DRDY(x) (((x) >> S_FATAL_OUTP_DRDY) & M_FATAL_OUTP_DRDY) + +#define S_FATAL_DEQ 0 +#define V_FATAL_DEQ(x) ((x) << S_FATAL_DEQ) +#define F_FATAL_DEQ V_FATAL_DEQ(1U) + #define A_SGE_DOORBELL_THROTTLE_THRESHOLD 0x112c #define S_THROTTLE_THRESHOLD_FL 16 @@ -2009,6 +2510,7 @@ #define V_THROTTLE_THRESHOLD_LP(x) ((x) << S_THROTTLE_THRESHOLD_LP) #define G_THROTTLE_THRESHOLD_LP(x) (((x) >> S_THROTTLE_THRESHOLD_LP) & M_THROTTLE_THRESHOLD_LP) +#define A_SGE_INT_ENABLE6 0x112c #define A_SGE_DBP_FETCH_THRESHOLD 0x1130 #define S_DBP_FETCH_THRESHOLD_FL 21 @@ -2066,6 +2568,11 @@ #define V_DBVFIFO_SIZE(x) ((x) << S_DBVFIFO_SIZE) #define G_DBVFIFO_SIZE(x) (((x) >> S_DBVFIFO_SIZE) & M_DBVFIFO_SIZE) +#define S_T6_DBVFIFO_SIZE 0 +#define M_T6_DBVFIFO_SIZE 0x1fffU +#define V_T6_DBVFIFO_SIZE(x) ((x) << S_T6_DBVFIFO_SIZE) +#define G_T6_DBVFIFO_SIZE(x) (((x) >> S_T6_DBVFIFO_SIZE) & M_T6_DBVFIFO_SIZE) + #define A_SGE_DBFIFO_STATUS3 0x1140 #define S_LP_PTRS_EQUAL 21 @@ -2093,6 +2600,18 @@ #define A_SGE_CHANGESET 0x1144 #define A_SGE_PC_RSP_ERROR 0x1148 +#define A_SGE_TBUF_CONTROL 0x114c + +#define S_DBPTBUFRSV1 9 +#define M_DBPTBUFRSV1 0x1ffU +#define V_DBPTBUFRSV1(x) ((x) << S_DBPTBUFRSV1) +#define G_DBPTBUFRSV1(x) (((x) >> S_DBPTBUFRSV1) & M_DBPTBUFRSV1) + +#define S_DBPTBUFRSV0 0 +#define M_DBPTBUFRSV0 0x1ffU +#define V_DBPTBUFRSV0(x) ((x) << S_DBPTBUFRSV0) +#define G_DBPTBUFRSV0(x) (((x) >> S_DBPTBUFRSV0) & M_DBPTBUFRSV0) + #define A_SGE_PC0_REQ_BIST_CMD 0x1180 #define A_SGE_PC0_REQ_BIST_ERROR_CNT 0x1184 #define A_SGE_PC1_REQ_BIST_CMD 0x1190 @@ -2138,6 +2657,115 @@ #define A_SGE_CTXT_MASK5 0x1234 #define A_SGE_CTXT_MASK6 0x1238 #define A_SGE_CTXT_MASK7 0x123c +#define A_SGE_QBASE_MAP0 0x1240 + +#define S_EGRESS0_SIZE 24 +#define M_EGRESS0_SIZE 0x1fU +#define V_EGRESS0_SIZE(x) ((x) << S_EGRESS0_SIZE) +#define G_EGRESS0_SIZE(x) (((x) >> S_EGRESS0_SIZE) & M_EGRESS0_SIZE) + +#define S_EGRESS1_SIZE 16 +#define M_EGRESS1_SIZE 0x1fU +#define V_EGRESS1_SIZE(x) ((x) << S_EGRESS1_SIZE) +#define G_EGRESS1_SIZE(x) (((x) >> S_EGRESS1_SIZE) & M_EGRESS1_SIZE) + +#define S_INGRESS0_SIZE 8 +#define M_INGRESS0_SIZE 0x1fU +#define V_INGRESS0_SIZE(x) ((x) << S_INGRESS0_SIZE) +#define G_INGRESS0_SIZE(x) (((x) >> S_INGRESS0_SIZE) & M_INGRESS0_SIZE) + +#define A_SGE_QBASE_MAP1 0x1244 + +#define S_EGRESS0_BASE 0 +#define M_EGRESS0_BASE 0x1ffffU +#define V_EGRESS0_BASE(x) ((x) << S_EGRESS0_BASE) +#define G_EGRESS0_BASE(x) (((x) >> S_EGRESS0_BASE) & M_EGRESS0_BASE) + +#define A_SGE_QBASE_MAP2 0x1248 + +#define S_EGRESS1_BASE 0 +#define M_EGRESS1_BASE 0x1ffffU +#define V_EGRESS1_BASE(x) ((x) << S_EGRESS1_BASE) +#define G_EGRESS1_BASE(x) (((x) >> S_EGRESS1_BASE) & M_EGRESS1_BASE) + +#define A_SGE_QBASE_MAP3 0x124c + +#define S_INGRESS1_BASE_256VF 16 +#define M_INGRESS1_BASE_256VF 0xffffU +#define V_INGRESS1_BASE_256VF(x) ((x) << S_INGRESS1_BASE_256VF) +#define G_INGRESS1_BASE_256VF(x) (((x) >> S_INGRESS1_BASE_256VF) & M_INGRESS1_BASE_256VF) + +#define S_INGRESS0_BASE 0 +#define M_INGRESS0_BASE 0xffffU +#define V_INGRESS0_BASE(x) ((x) << S_INGRESS0_BASE) +#define G_INGRESS0_BASE(x) (((x) >> S_INGRESS0_BASE) & M_INGRESS0_BASE) + +#define A_SGE_QBASE_INDEX 0x1250 + +#define S_QIDX 0 +#define M_QIDX 0x1ffU +#define V_QIDX(x) ((x) << S_QIDX) +#define G_QIDX(x) (((x) >> S_QIDX) & M_QIDX) + +#define A_SGE_CONM_CTRL2 0x1254 + +#define S_FLMTHRESHPACK 8 +#define M_FLMTHRESHPACK 0x7fU +#define V_FLMTHRESHPACK(x) ((x) << S_FLMTHRESHPACK) +#define G_FLMTHRESHPACK(x) (((x) >> S_FLMTHRESHPACK) & M_FLMTHRESHPACK) + +#define S_FLMTHRESH 0 +#define M_FLMTHRESH 0x7fU +#define V_FLMTHRESH(x) ((x) << S_FLMTHRESH) +#define G_FLMTHRESH(x) (((x) >> S_FLMTHRESH) & M_FLMTHRESH) + +#define A_SGE_DEBUG_CONM 0x1258 + +#define S_MPS_CH_CNG 16 +#define M_MPS_CH_CNG 0xffffU +#define V_MPS_CH_CNG(x) ((x) << S_MPS_CH_CNG) +#define G_MPS_CH_CNG(x) (((x) >> S_MPS_CH_CNG) & M_MPS_CH_CNG) + +#define S_TP_CH_CNG 14 +#define M_TP_CH_CNG 0x3U +#define V_TP_CH_CNG(x) ((x) << S_TP_CH_CNG) +#define G_TP_CH_CNG(x) (((x) >> S_TP_CH_CNG) & M_TP_CH_CNG) + +#define S_ST_CONG 12 +#define M_ST_CONG 0x3U +#define V_ST_CONG(x) ((x) << S_ST_CONG) +#define G_ST_CONG(x) (((x) >> S_ST_CONG) & M_ST_CONG) + +#define S_LAST_XOFF 10 +#define V_LAST_XOFF(x) ((x) << S_LAST_XOFF) +#define F_LAST_XOFF V_LAST_XOFF(1U) + +#define S_LAST_QID 0 +#define M_LAST_QID 0x3ffU +#define V_LAST_QID(x) ((x) << S_LAST_QID) +#define G_LAST_QID(x) (((x) >> S_LAST_QID) & M_LAST_QID) + +#define A_SGE_DBG_QUEUE_STAT0_CTRL 0x125c + +#define S_IMSG_GTS_SEL 18 +#define V_IMSG_GTS_SEL(x) ((x) << S_IMSG_GTS_SEL) +#define F_IMSG_GTS_SEL V_IMSG_GTS_SEL(1U) + +#define S_MGT_SEL 17 +#define V_MGT_SEL(x) ((x) << S_MGT_SEL) +#define F_MGT_SEL V_MGT_SEL(1U) + +#define S_DB_GTS_QID 0 +#define M_DB_GTS_QID 0x1ffffU +#define V_DB_GTS_QID(x) ((x) << S_DB_GTS_QID) +#define G_DB_GTS_QID(x) (((x) >> S_DB_GTS_QID) & M_DB_GTS_QID) + +#define A_SGE_DBG_QUEUE_STAT1_CTRL 0x1260 +#define A_SGE_DBG_QUEUE_STAT0 0x1264 +#define A_SGE_DBG_QUEUE_STAT1 0x1268 +#define A_SGE_DBG_BAR2_PKT_CNT 0x126c +#define A_SGE_DBG_DB_PKT_CNT 0x1270 +#define A_SGE_DBG_GTS_PKT_CNT 0x1274 #define A_SGE_DEBUG_DATA_HIGH_INDEX_0 0x1280 #define S_CIM_WM 24 @@ -2175,6 +2803,16 @@ #define V_DEBUG_CIM_EOP0_CNT(x) ((x) << S_DEBUG_CIM_EOP0_CNT) #define G_DEBUG_CIM_EOP0_CNT(x) (((x) >> S_DEBUG_CIM_EOP0_CNT) & M_DEBUG_CIM_EOP0_CNT) +#define S_DEBUG_BAR2_SOP_CNT 28 +#define M_DEBUG_BAR2_SOP_CNT 0xfU +#define V_DEBUG_BAR2_SOP_CNT(x) ((x) << S_DEBUG_BAR2_SOP_CNT) +#define G_DEBUG_BAR2_SOP_CNT(x) (((x) >> S_DEBUG_BAR2_SOP_CNT) & M_DEBUG_BAR2_SOP_CNT) + +#define S_DEBUG_BAR2_EOP_CNT 24 +#define M_DEBUG_BAR2_EOP_CNT 0xfU +#define V_DEBUG_BAR2_EOP_CNT(x) ((x) << S_DEBUG_BAR2_EOP_CNT) +#define G_DEBUG_BAR2_EOP_CNT(x) (((x) >> S_DEBUG_BAR2_EOP_CNT) & M_DEBUG_BAR2_EOP_CNT) + #define A_SGE_DEBUG_DATA_HIGH_INDEX_1 0x1284 #define S_DEBUG_T_RX_SOP1_CNT 28 @@ -2259,6 +2897,16 @@ #define V_DEBUG_UD_RX_EOP0_CNT(x) ((x) << S_DEBUG_UD_RX_EOP0_CNT) #define G_DEBUG_UD_RX_EOP0_CNT(x) (((x) >> S_DEBUG_UD_RX_EOP0_CNT) & M_DEBUG_UD_RX_EOP0_CNT) +#define S_DBG_TBUF_USED1 9 +#define M_DBG_TBUF_USED1 0x1ffU +#define V_DBG_TBUF_USED1(x) ((x) << S_DBG_TBUF_USED1) +#define G_DBG_TBUF_USED1(x) (((x) >> S_DBG_TBUF_USED1) & M_DBG_TBUF_USED1) + +#define S_DBG_TBUF_USED0 0 +#define M_DBG_TBUF_USED0 0x1ffU +#define V_DBG_TBUF_USED0(x) ((x) << S_DBG_TBUF_USED0) +#define G_DBG_TBUF_USED0(x) (((x) >> S_DBG_TBUF_USED0) & M_DBG_TBUF_USED0) + #define A_SGE_DEBUG_DATA_HIGH_INDEX_3 0x128c #define S_DEBUG_U_TX_SOP3_CNT 28 @@ -2301,6 +2949,28 @@ #define V_DEBUG_U_TX_EOP0_CNT(x) ((x) << S_DEBUG_U_TX_EOP0_CNT) #define G_DEBUG_U_TX_EOP0_CNT(x) (((x) >> S_DEBUG_U_TX_EOP0_CNT) & M_DEBUG_U_TX_EOP0_CNT) +#define A_SGE_DEBUG1_DBP_THREAD 0x128c + +#define S_WR_DEQ_CNT 12 +#define M_WR_DEQ_CNT 0xfU +#define V_WR_DEQ_CNT(x) ((x) << S_WR_DEQ_CNT) +#define G_WR_DEQ_CNT(x) (((x) >> S_WR_DEQ_CNT) & M_WR_DEQ_CNT) + +#define S_WR_ENQ_CNT 8 +#define M_WR_ENQ_CNT 0xfU +#define V_WR_ENQ_CNT(x) ((x) << S_WR_ENQ_CNT) +#define G_WR_ENQ_CNT(x) (((x) >> S_WR_ENQ_CNT) & M_WR_ENQ_CNT) + +#define S_FL_DEQ_CNT 4 +#define M_FL_DEQ_CNT 0xfU +#define V_FL_DEQ_CNT(x) ((x) << S_FL_DEQ_CNT) +#define G_FL_DEQ_CNT(x) (((x) >> S_FL_DEQ_CNT) & M_FL_DEQ_CNT) + +#define S_FL_ENQ_CNT 0 +#define M_FL_ENQ_CNT 0xfU +#define V_FL_ENQ_CNT(x) ((x) << S_FL_ENQ_CNT) +#define G_FL_ENQ_CNT(x) (((x) >> S_FL_ENQ_CNT) & M_FL_ENQ_CNT) + #define A_SGE_DEBUG_DATA_HIGH_INDEX_4 0x1290 #define S_DEBUG_PC_RSP_SOP1_CNT 28 @@ -2469,6 +3139,26 @@ #define V_DEBUG_PD_WRREQ_EOP0_CNT(x) ((x) << S_DEBUG_PD_WRREQ_EOP0_CNT) #define G_DEBUG_PD_WRREQ_EOP0_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_EOP0_CNT) & M_DEBUG_PD_WRREQ_EOP0_CNT) +#define S_DEBUG_PC_RSP_SOP_CNT 28 +#define M_DEBUG_PC_RSP_SOP_CNT 0xfU +#define V_DEBUG_PC_RSP_SOP_CNT(x) ((x) << S_DEBUG_PC_RSP_SOP_CNT) +#define G_DEBUG_PC_RSP_SOP_CNT(x) (((x) >> S_DEBUG_PC_RSP_SOP_CNT) & M_DEBUG_PC_RSP_SOP_CNT) + +#define S_DEBUG_PC_RSP_EOP_CNT 24 +#define M_DEBUG_PC_RSP_EOP_CNT 0xfU +#define V_DEBUG_PC_RSP_EOP_CNT(x) ((x) << S_DEBUG_PC_RSP_EOP_CNT) +#define G_DEBUG_PC_RSP_EOP_CNT(x) (((x) >> S_DEBUG_PC_RSP_EOP_CNT) & M_DEBUG_PC_RSP_EOP_CNT) + +#define S_DEBUG_PC_REQ_SOP_CNT 20 +#define M_DEBUG_PC_REQ_SOP_CNT 0xfU +#define V_DEBUG_PC_REQ_SOP_CNT(x) ((x) << S_DEBUG_PC_REQ_SOP_CNT) +#define G_DEBUG_PC_REQ_SOP_CNT(x) (((x) >> S_DEBUG_PC_REQ_SOP_CNT) & M_DEBUG_PC_REQ_SOP_CNT) + +#define S_DEBUG_PC_REQ_EOP_CNT 16 +#define M_DEBUG_PC_REQ_EOP_CNT 0xfU +#define V_DEBUG_PC_REQ_EOP_CNT(x) ((x) << S_DEBUG_PC_REQ_EOP_CNT) +#define G_DEBUG_PC_REQ_EOP_CNT(x) (((x) >> S_DEBUG_PC_REQ_EOP_CNT) & M_DEBUG_PC_REQ_EOP_CNT) + #define A_SGE_DEBUG_DATA_HIGH_INDEX_8 0x12a0 #define S_GLOBALENABLE_OFF 29 @@ -2541,6 +3231,14 @@ #define V_DEBUG_PD_WRREQ_INT0_CNT(x) ((x) << S_DEBUG_PD_WRREQ_INT0_CNT) #define G_DEBUG_PD_WRREQ_INT0_CNT(x) (((x) >> S_DEBUG_PD_WRREQ_INT0_CNT) & M_DEBUG_PD_WRREQ_INT0_CNT) +#define S_DEBUG_PL_BAR2_REQVLD 31 +#define V_DEBUG_PL_BAR2_REQVLD(x) ((x) << S_DEBUG_PL_BAR2_REQVLD) +#define F_DEBUG_PL_BAR2_REQVLD V_DEBUG_PL_BAR2_REQVLD(1U) + +#define S_DEBUG_PL_BAR2_REQFULL 30 +#define V_DEBUG_PL_BAR2_REQFULL(x) ((x) << S_DEBUG_PL_BAR2_REQFULL) +#define F_DEBUG_PL_BAR2_REQFULL V_DEBUG_PL_BAR2_REQFULL(1U) + #define A_SGE_DEBUG_DATA_HIGH_INDEX_9 0x12a4 #define S_DEBUG_CPLSW_TP_RX_SOP1_CNT 28 @@ -2635,6 +3333,94 @@ #define V_DEBUG_CIM_AFULL_D(x) ((x) << S_DEBUG_CIM_AFULL_D) #define G_DEBUG_CIM_AFULL_D(x) (((x) >> S_DEBUG_CIM_AFULL_D) & M_DEBUG_CIM_AFULL_D) +#define S_DEBUG_IDMA1_S_CPL_FLIT_REMAINING 28 +#define M_DEBUG_IDMA1_S_CPL_FLIT_REMAINING 0xfU +#define V_DEBUG_IDMA1_S_CPL_FLIT_REMAINING(x) ((x) << S_DEBUG_IDMA1_S_CPL_FLIT_REMAINING) +#define G_DEBUG_IDMA1_S_CPL_FLIT_REMAINING(x) (((x) >> S_DEBUG_IDMA1_S_CPL_FLIT_REMAINING) & M_DEBUG_IDMA1_S_CPL_FLIT_REMAINING) + +#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY 27 +#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY) +#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_SRDY(1U) + +#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS 26 +#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS) +#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_RSS(1U) + +#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL 25 +#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL) +#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL V_DEBUG_IDMA1_IDMA2IMSG_CMP_OUT_NOCPL(1U) + +#define S_DEBUG_IDMA1_IDMA2IMSG_FULL 24 +#define V_DEBUG_IDMA1_IDMA2IMSG_FULL(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_FULL) +#define F_DEBUG_IDMA1_IDMA2IMSG_FULL V_DEBUG_IDMA1_IDMA2IMSG_FULL(1U) + +#define S_DEBUG_IDMA1_IDMA2IMSG_EOP 23 +#define V_DEBUG_IDMA1_IDMA2IMSG_EOP(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_EOP) +#define F_DEBUG_IDMA1_IDMA2IMSG_EOP V_DEBUG_IDMA1_IDMA2IMSG_EOP(1U) + +#define S_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY 22 +#define V_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY) +#define F_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY V_DEBUG_IDMA1_IDMA2IMSG_FIFO_IN_DRDY(1U) + +#define S_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY 21 +#define V_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY(x) ((x) << S_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY) +#define F_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY V_DEBUG_IDMA1_IDMA2IMSG_CMP_IN_DRDY(1U) + +#define S_DEBUG_IDMA0_S_CPL_FLIT_REMAINING 17 +#define M_DEBUG_IDMA0_S_CPL_FLIT_REMAINING 0xfU +#define V_DEBUG_IDMA0_S_CPL_FLIT_REMAINING(x) ((x) << S_DEBUG_IDMA0_S_CPL_FLIT_REMAINING) +#define G_DEBUG_IDMA0_S_CPL_FLIT_REMAINING(x) (((x) >> S_DEBUG_IDMA0_S_CPL_FLIT_REMAINING) & M_DEBUG_IDMA0_S_CPL_FLIT_REMAINING) + +#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY 16 +#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY) +#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_SRDY(1U) + +#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS 15 +#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS) +#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_RSS(1U) + +#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL 14 +#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL) +#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL V_DEBUG_IDMA0_IDMA2IMSG_CMP_OUT_NOCPL(1U) + +#define S_DEBUG_IDMA0_IDMA2IMSG_FULL 13 +#define V_DEBUG_IDMA0_IDMA2IMSG_FULL(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_FULL) +#define F_DEBUG_IDMA0_IDMA2IMSG_FULL V_DEBUG_IDMA0_IDMA2IMSG_FULL(1U) + +#define S_DEBUG_IDMA0_IDMA2IMSG_EOP 12 +#define V_DEBUG_IDMA0_IDMA2IMSG_EOP(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_EOP) +#define F_DEBUG_IDMA0_IDMA2IMSG_EOP V_DEBUG_IDMA0_IDMA2IMSG_EOP(1U) + +#define S_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY 11 +#define V_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY) +#define F_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY V_DEBUG_IDMA0_IDMA2IMSG_CMP_IN_DRDY(1U) + +#define S_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY 10 +#define V_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY(x) ((x) << S_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY) +#define F_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY V_DEBUG_IDMA0_IDMA2IMSG_FIFO_IN_DRDY(1U) + +#define S_T6_DEBUG_T_RXAFULL_D 8 +#define M_T6_DEBUG_T_RXAFULL_D 0x3U +#define V_T6_DEBUG_T_RXAFULL_D(x) ((x) << S_T6_DEBUG_T_RXAFULL_D) +#define G_T6_DEBUG_T_RXAFULL_D(x) (((x) >> S_T6_DEBUG_T_RXAFULL_D) & M_T6_DEBUG_T_RXAFULL_D) + +#define S_T6_DEBUG_PD_WRREQAFULL_D 6 +#define M_T6_DEBUG_PD_WRREQAFULL_D 0x3U +#define V_T6_DEBUG_PD_WRREQAFULL_D(x) ((x) << S_T6_DEBUG_PD_WRREQAFULL_D) +#define G_T6_DEBUG_PD_WRREQAFULL_D(x) (((x) >> S_T6_DEBUG_PD_WRREQAFULL_D) & M_T6_DEBUG_PD_WRREQAFULL_D) + +#define S_T6_DEBUG_PC_RSPAFULL_D 5 +#define V_T6_DEBUG_PC_RSPAFULL_D(x) ((x) << S_T6_DEBUG_PC_RSPAFULL_D) +#define F_T6_DEBUG_PC_RSPAFULL_D V_T6_DEBUG_PC_RSPAFULL_D(1U) + +#define S_T6_DEBUG_PC_REQAFULL_D 4 +#define V_T6_DEBUG_PC_REQAFULL_D(x) ((x) << S_T6_DEBUG_PC_REQAFULL_D) +#define F_T6_DEBUG_PC_REQAFULL_D V_T6_DEBUG_PC_REQAFULL_D(1U) + +#define S_T6_DEBUG_CIM_AFULL_D 0 +#define V_T6_DEBUG_CIM_AFULL_D(x) ((x) << S_T6_DEBUG_CIM_AFULL_D) +#define F_T6_DEBUG_CIM_AFULL_D V_T6_DEBUG_CIM_AFULL_D(1U) + #define A_SGE_DEBUG_DATA_HIGH_INDEX_11 0x12ac #define S_DEBUG_FLM_IDMA1_CACHE_DATA_ACTIVE 24 @@ -2731,6 +3517,16 @@ #define V_DEBUG_PC_REQ_EOP2_CNT(x) ((x) << S_DEBUG_PC_REQ_EOP2_CNT) #define G_DEBUG_PC_REQ_EOP2_CNT(x) (((x) >> S_DEBUG_PC_REQ_EOP2_CNT) & M_DEBUG_PC_REQ_EOP2_CNT) +#define S_DEBUG_IDMA1_ISHIFT_TX_SIZE 8 +#define M_DEBUG_IDMA1_ISHIFT_TX_SIZE 0x7fU +#define V_DEBUG_IDMA1_ISHIFT_TX_SIZE(x) ((x) << S_DEBUG_IDMA1_ISHIFT_TX_SIZE) +#define G_DEBUG_IDMA1_ISHIFT_TX_SIZE(x) (((x) >> S_DEBUG_IDMA1_ISHIFT_TX_SIZE) & M_DEBUG_IDMA1_ISHIFT_TX_SIZE) + +#define S_DEBUG_IDMA0_ISHIFT_TX_SIZE 0 +#define M_DEBUG_IDMA0_ISHIFT_TX_SIZE 0x7fU +#define V_DEBUG_IDMA0_ISHIFT_TX_SIZE(x) ((x) << S_DEBUG_IDMA0_ISHIFT_TX_SIZE) +#define G_DEBUG_IDMA0_ISHIFT_TX_SIZE(x) (((x) >> S_DEBUG_IDMA0_ISHIFT_TX_SIZE) & M_DEBUG_IDMA0_ISHIFT_TX_SIZE) + #define A_SGE_DEBUG_DATA_HIGH_INDEX_13 0x12b4 #define A_SGE_DEBUG_DATA_HIGH_INDEX_14 0x12b8 #define A_SGE_DEBUG_DATA_HIGH_INDEX_15 0x12bc @@ -2776,6 +3572,14 @@ #define V_DEBUG_ST_IDMA0_IDMA_SM(x) ((x) << S_DEBUG_ST_IDMA0_IDMA_SM) #define G_DEBUG_ST_IDMA0_IDMA_SM(x) (((x) >> S_DEBUG_ST_IDMA0_IDMA_SM) & M_DEBUG_ST_IDMA0_IDMA_SM) +#define S_DEBUG_ST_IDMA1_IDMA2IMSG 15 +#define V_DEBUG_ST_IDMA1_IDMA2IMSG(x) ((x) << S_DEBUG_ST_IDMA1_IDMA2IMSG) +#define F_DEBUG_ST_IDMA1_IDMA2IMSG V_DEBUG_ST_IDMA1_IDMA2IMSG(1U) + +#define S_DEBUG_ST_IDMA0_IDMA2IMSG 6 +#define V_DEBUG_ST_IDMA0_IDMA2IMSG(x) ((x) << S_DEBUG_ST_IDMA0_IDMA2IMSG) +#define F_DEBUG_ST_IDMA0_IDMA2IMSG V_DEBUG_ST_IDMA0_IDMA2IMSG(1U) + #define A_SGE_DEBUG_DATA_LOW_INDEX_1 0x12c4 #define S_DEBUG_ITP_EMPTY 12 @@ -2837,6 +3641,11 @@ #define V_DEBUG_ST_DBP_THREAD0_MAIN(x) ((x) << S_DEBUG_ST_DBP_THREAD0_MAIN) #define G_DEBUG_ST_DBP_THREAD0_MAIN(x) (((x) >> S_DEBUG_ST_DBP_THREAD0_MAIN) & M_DEBUG_ST_DBP_THREAD0_MAIN) +#define S_T6_DEBUG_ST_DBP_UPCP_MAIN 14 +#define M_T6_DEBUG_ST_DBP_UPCP_MAIN 0x7U +#define V_T6_DEBUG_ST_DBP_UPCP_MAIN(x) ((x) << S_T6_DEBUG_ST_DBP_UPCP_MAIN) +#define G_T6_DEBUG_ST_DBP_UPCP_MAIN(x) (((x) >> S_T6_DEBUG_ST_DBP_UPCP_MAIN) & M_T6_DEBUG_ST_DBP_UPCP_MAIN) + #define A_SGE_DEBUG_DATA_LOW_INDEX_3 0x12cc #define S_DEBUG_ST_DBP_UPCP_MAIN 14 @@ -2956,6 +3765,28 @@ #define V_DEBUG_FLM_DBPTR_QID(x) ((x) << S_DEBUG_FLM_DBPTR_QID) #define G_DEBUG_FLM_DBPTR_QID(x) (((x) >> S_DEBUG_FLM_DBPTR_QID) & M_DEBUG_FLM_DBPTR_QID) +#define A_SGE_DEBUG0_DBP_THREAD 0x12d4 + +#define S_THREAD_ST_MAIN 25 +#define M_THREAD_ST_MAIN 0x3fU +#define V_THREAD_ST_MAIN(x) ((x) << S_THREAD_ST_MAIN) +#define G_THREAD_ST_MAIN(x) (((x) >> S_THREAD_ST_MAIN) & M_THREAD_ST_MAIN) + +#define S_THREAD_ST_CIMFL 21 +#define M_THREAD_ST_CIMFL 0xfU +#define V_THREAD_ST_CIMFL(x) ((x) << S_THREAD_ST_CIMFL) +#define G_THREAD_ST_CIMFL(x) (((x) >> S_THREAD_ST_CIMFL) & M_THREAD_ST_CIMFL) + +#define S_THREAD_CMDOP 17 +#define M_THREAD_CMDOP 0xfU +#define V_THREAD_CMDOP(x) ((x) << S_THREAD_CMDOP) +#define G_THREAD_CMDOP(x) (((x) >> S_THREAD_CMDOP) & M_THREAD_CMDOP) + +#define S_THREAD_QID 0 +#define M_THREAD_QID 0x1ffffU +#define V_THREAD_QID(x) ((x) << S_THREAD_QID) +#define G_THREAD_QID(x) (((x) >> S_THREAD_QID) & M_THREAD_QID) + #define A_SGE_DEBUG_DATA_LOW_INDEX_6 0x12d8 #define S_DEBUG_DBP_THREAD0_QID 0 @@ -3060,6 +3891,37 @@ #define V_INGRESS1_SIZE(x) ((x) << S_INGRESS1_SIZE) #define G_INGRESS1_SIZE(x) (((x) >> S_INGRESS1_SIZE) & M_INGRESS1_SIZE) +#define A_SGE_WC_EGRS_BAR2_OFF_PF 0x1300 + +#define S_PFIQSPERPAGE 28 +#define M_PFIQSPERPAGE 0xfU +#define V_PFIQSPERPAGE(x) ((x) << S_PFIQSPERPAGE) +#define G_PFIQSPERPAGE(x) (((x) >> S_PFIQSPERPAGE) & M_PFIQSPERPAGE) + +#define S_PFEQSPERPAGE 24 +#define M_PFEQSPERPAGE 0xfU +#define V_PFEQSPERPAGE(x) ((x) << S_PFEQSPERPAGE) +#define G_PFEQSPERPAGE(x) (((x) >> S_PFEQSPERPAGE) & M_PFEQSPERPAGE) + +#define S_PFWCQSPERPAGE 20 +#define M_PFWCQSPERPAGE 0xfU +#define V_PFWCQSPERPAGE(x) ((x) << S_PFWCQSPERPAGE) +#define G_PFWCQSPERPAGE(x) (((x) >> S_PFWCQSPERPAGE) & M_PFWCQSPERPAGE) + +#define S_PFWCOFFEN 19 +#define V_PFWCOFFEN(x) ((x) << S_PFWCOFFEN) +#define F_PFWCOFFEN V_PFWCOFFEN(1U) + +#define S_PFMAXWCSIZE 17 +#define M_PFMAXWCSIZE 0x3U +#define V_PFMAXWCSIZE(x) ((x) << S_PFMAXWCSIZE) +#define G_PFMAXWCSIZE(x) (((x) >> S_PFMAXWCSIZE) & M_PFMAXWCSIZE) + +#define S_PFWCOFFSET 0 +#define M_PFWCOFFSET 0x1ffffU +#define V_PFWCOFFSET(x) ((x) << S_PFWCOFFSET) +#define G_PFWCOFFSET(x) (((x) >> S_PFWCOFFSET) & M_PFWCOFFSET) + #define A_SGE_QUEUE_BASE_MAP_LOW 0x1304 #define S_INGRESS2_BASE 16 @@ -3072,6 +3934,37 @@ #define V_INGRESS1_BASE(x) ((x) << S_INGRESS1_BASE) #define G_INGRESS1_BASE(x) (((x) >> S_INGRESS1_BASE) & M_INGRESS1_BASE) +#define A_SGE_WC_EGRS_BAR2_OFF_VF 0x1320 + +#define S_VFIQSPERPAGE 28 +#define M_VFIQSPERPAGE 0xfU +#define V_VFIQSPERPAGE(x) ((x) << S_VFIQSPERPAGE) +#define G_VFIQSPERPAGE(x) (((x) >> S_VFIQSPERPAGE) & M_VFIQSPERPAGE) + +#define S_VFEQSPERPAGE 24 +#define M_VFEQSPERPAGE 0xfU +#define V_VFEQSPERPAGE(x) ((x) << S_VFEQSPERPAGE) +#define G_VFEQSPERPAGE(x) (((x) >> S_VFEQSPERPAGE) & M_VFEQSPERPAGE) + +#define S_VFWCQSPERPAGE 20 +#define M_VFWCQSPERPAGE 0xfU +#define V_VFWCQSPERPAGE(x) ((x) << S_VFWCQSPERPAGE) +#define G_VFWCQSPERPAGE(x) (((x) >> S_VFWCQSPERPAGE) & M_VFWCQSPERPAGE) + +#define S_VFWCOFFEN 19 +#define V_VFWCOFFEN(x) ((x) << S_VFWCOFFEN) +#define F_VFWCOFFEN V_VFWCOFFEN(1U) + +#define S_VFMAXWCSIZE 17 +#define M_VFMAXWCSIZE 0x3U +#define V_VFMAXWCSIZE(x) ((x) << S_VFMAXWCSIZE) +#define G_VFMAXWCSIZE(x) (((x) >> S_VFMAXWCSIZE) & M_VFMAXWCSIZE) + +#define S_VFWCOFFSET 0 +#define M_VFWCOFFSET 0x1ffffU +#define V_VFWCOFFSET(x) ((x) << S_VFWCOFFSET) +#define G_VFWCOFFSET(x) (((x) >> S_VFWCOFFSET) & M_VFWCOFFSET) + #define A_SGE_LA_RDPTR_0 0x1800 #define A_SGE_LA_RDDATA_0 0x1804 #define A_SGE_LA_WRPTR_0 0x1808 @@ -3409,6 +4302,11 @@ #define V_IDE(x) ((x) << S_IDE) #define F_IDE V_IDE(1U) +#define S_MEMSEL_PCIE 1 +#define M_MEMSEL_PCIE 0x1fU +#define V_MEMSEL_PCIE(x) ((x) << S_MEMSEL_PCIE) +#define G_MEMSEL_PCIE(x) (((x) >> S_MEMSEL_PCIE) & M_MEMSEL_PCIE) + #define A_PCIE_NONFAT_ERR 0x3010 #define S_RDRSPERR 9 @@ -3535,6 +4433,14 @@ #define V_BAR2REQ(x) ((x) << S_BAR2REQ) #define F_BAR2REQ V_BAR2REQ(1U) +#define S_MARSPUE 30 +#define V_MARSPUE(x) ((x) << S_MARSPUE) +#define F_MARSPUE V_MARSPUE(1U) + +#define S_KDBEOPERR 7 +#define V_KDBEOPERR(x) ((x) << S_KDBEOPERR) +#define F_KDBEOPERR V_KDBEOPERR(1U) + #define A_PCIE_CFG 0x3014 #define S_CFGDMAXPYLDSZRX 26 @@ -3625,6 +4531,10 @@ #define V_LINKDNRSTEN(x) ((x) << S_LINKDNRSTEN) #define F_LINKDNRSTEN V_LINKDNRSTEN(1U) +#define S_T5_PIOSTOPEN 31 +#define V_T5_PIOSTOPEN(x) ((x) << S_T5_PIOSTOPEN) +#define F_T5_PIOSTOPEN V_T5_PIOSTOPEN(1U) + #define S_DIAGCTRLBUS 28 #define M_DIAGCTRLBUS 0x7U #define V_DIAGCTRLBUS(x) ((x) << S_DIAGCTRLBUS) @@ -3672,6 +4582,10 @@ #define V_LINKREQRSTPCIECRSTMODE(x) ((x) << S_LINKREQRSTPCIECRSTMODE) #define F_LINKREQRSTPCIECRSTMODE V_LINKREQRSTPCIECRSTMODE(1U) +#define S_T6_PIOSTOPEN 31 +#define V_T6_PIOSTOPEN(x) ((x) << S_T6_PIOSTOPEN) +#define F_T6_PIOSTOPEN V_T6_PIOSTOPEN(1U) + #define A_PCIE_DMA_CTRL 0x3018 #define S_LITTLEENDIAN 7 @@ -3699,6 +4613,11 @@ #define V_TOTMAXTAG(x) ((x) << S_TOTMAXTAG) #define G_TOTMAXTAG(x) (((x) >> S_TOTMAXTAG) & M_TOTMAXTAG) +#define S_T6_TOTMAXTAG 0 +#define M_T6_TOTMAXTAG 0x7U +#define V_T6_TOTMAXTAG(x) ((x) << S_T6_TOTMAXTAG) +#define G_T6_TOTMAXTAG(x) (((x) >> S_T6_TOTMAXTAG) & M_T6_TOTMAXTAG) + #define A_PCIE_DMA_CFG 0x301c #define S_MAXPYLDSIZE 28 @@ -3828,6 +4747,7 @@ #define V_PERSTTIMER(x) ((x) << S_PERSTTIMER) #define G_PERSTTIMER(x) (((x) >> S_PERSTTIMER) & M_PERSTTIMER) +#define A_PCIE_CFG7 0x302c #define A_PCIE_CMD_CTRL 0x303c #define A_PCIE_CMD_CFG 0x3040 @@ -3965,6 +4885,32 @@ #define V_CFG_SPACE_PF(x) ((x) << S_CFG_SPACE_PF) #define G_CFG_SPACE_PF(x) (((x) >> S_CFG_SPACE_PF) & M_CFG_SPACE_PF) +#define S_T6_ENABLE 31 +#define V_T6_ENABLE(x) ((x) << S_T6_ENABLE) +#define F_T6_ENABLE V_T6_ENABLE(1U) + +#define S_T6_AI 30 +#define V_T6_AI(x) ((x) << S_T6_AI) +#define F_T6_AI V_T6_AI(1U) + +#define S_T6_CS2 29 +#define V_T6_CS2(x) ((x) << S_T6_CS2) +#define F_T6_CS2 V_T6_CS2(1U) + +#define S_T6_WRBE 25 +#define M_T6_WRBE 0xfU +#define V_T6_WRBE(x) ((x) << S_T6_WRBE) +#define G_T6_WRBE(x) (((x) >> S_T6_WRBE) & M_T6_WRBE) + +#define S_T6_CFG_SPACE_VFVLD 24 +#define V_T6_CFG_SPACE_VFVLD(x) ((x) << S_T6_CFG_SPACE_VFVLD) +#define F_T6_CFG_SPACE_VFVLD V_T6_CFG_SPACE_VFVLD(1U) + +#define S_T6_CFG_SPACE_RVF 16 +#define M_T6_CFG_SPACE_RVF 0xffU +#define V_T6_CFG_SPACE_RVF(x) ((x) << S_T6_CFG_SPACE_RVF) +#define G_T6_CFG_SPACE_RVF(x) (((x) >> S_T6_CFG_SPACE_RVF) & M_T6_CFG_SPACE_RVF) + #define A_PCIE_CFG_SPACE_DATA 0x3064 #define A_PCIE_MEM_ACCESS_BASE_WIN 0x3068 @@ -4294,6 +5240,30 @@ #define V_FID_VFID_RVF(x) ((x) << S_FID_VFID_RVF) #define G_FID_VFID_RVF(x) (((x) >> S_FID_VFID_RVF) & M_FID_VFID_RVF) +#define S_T6_FID_VFID_VFID 15 +#define M_T6_FID_VFID_VFID 0x1ffU +#define V_T6_FID_VFID_VFID(x) ((x) << S_T6_FID_VFID_VFID) +#define G_T6_FID_VFID_VFID(x) (((x) >> S_T6_FID_VFID_VFID) & M_T6_FID_VFID_VFID) + +#define S_T6_FID_VFID_TC 12 +#define M_T6_FID_VFID_TC 0x7U +#define V_T6_FID_VFID_TC(x) ((x) << S_T6_FID_VFID_TC) +#define G_T6_FID_VFID_TC(x) (((x) >> S_T6_FID_VFID_TC) & M_T6_FID_VFID_TC) + +#define S_T6_FID_VFID_VFVLD 11 +#define V_T6_FID_VFID_VFVLD(x) ((x) << S_T6_FID_VFID_VFVLD) +#define F_T6_FID_VFID_VFVLD V_T6_FID_VFID_VFVLD(1U) + +#define S_T6_FID_VFID_PF 8 +#define M_T6_FID_VFID_PF 0x7U +#define V_T6_FID_VFID_PF(x) ((x) << S_T6_FID_VFID_PF) +#define G_T6_FID_VFID_PF(x) (((x) >> S_T6_FID_VFID_PF) & M_T6_FID_VFID_PF) + +#define S_T6_FID_VFID_RVF 0 +#define M_T6_FID_VFID_RVF 0xffU +#define V_T6_FID_VFID_RVF(x) ((x) << S_T6_FID_VFID_RVF) +#define G_T6_FID_VFID_RVF(x) (((x) >> S_T6_FID_VFID_RVF) & M_T6_FID_VFID_RVF) + #define A_PCIE_FID 0x3900 #define S_PAD 11 @@ -4356,6 +5326,26 @@ #define V_RCVDPIOREQCOOKIE(x) ((x) << S_RCVDPIOREQCOOKIE) #define G_RCVDPIOREQCOOKIE(x) (((x) >> S_RCVDPIOREQCOOKIE) & M_RCVDPIOREQCOOKIE) +#define S_RCVDVDMRXCOOKIE 24 +#define M_RCVDVDMRXCOOKIE 0xffU +#define V_RCVDVDMRXCOOKIE(x) ((x) << S_RCVDVDMRXCOOKIE) +#define G_RCVDVDMRXCOOKIE(x) (((x) >> S_RCVDVDMRXCOOKIE) & M_RCVDVDMRXCOOKIE) + +#define S_RCVDVDMTXCOOKIE 16 +#define M_RCVDVDMTXCOOKIE 0xffU +#define V_RCVDVDMTXCOOKIE(x) ((x) << S_RCVDVDMTXCOOKIE) +#define G_RCVDVDMTXCOOKIE(x) (((x) >> S_RCVDVDMTXCOOKIE) & M_RCVDVDMTXCOOKIE) + +#define S_T6_RCVDMAREQCOOKIE 8 +#define M_T6_RCVDMAREQCOOKIE 0xffU +#define V_T6_RCVDMAREQCOOKIE(x) ((x) << S_T6_RCVDMAREQCOOKIE) +#define G_T6_RCVDMAREQCOOKIE(x) (((x) >> S_T6_RCVDMAREQCOOKIE) & M_T6_RCVDMAREQCOOKIE) + +#define S_T6_RCVDPIOREQCOOKIE 0 +#define M_T6_RCVDPIOREQCOOKIE 0xffU +#define V_T6_RCVDPIOREQCOOKIE(x) ((x) << S_T6_RCVDPIOREQCOOKIE) +#define G_T6_RCVDPIOREQCOOKIE(x) (((x) >> S_T6_RCVDPIOREQCOOKIE) & M_T6_RCVDPIOREQCOOKIE) + #define A_PCIE_VC0_CDTS0 0x56cc #define S_CPLD0 20 @@ -4455,6 +5445,20 @@ #define V_STATECFGINIT(x) ((x) << S_STATECFGINIT) #define G_STATECFGINIT(x) (((x) >> S_STATECFGINIT) & M_STATECFGINIT) +#define S_LTSSMENABLE_PCIE 12 +#define V_LTSSMENABLE_PCIE(x) ((x) << S_LTSSMENABLE_PCIE) +#define F_LTSSMENABLE_PCIE V_LTSSMENABLE_PCIE(1U) + +#define S_STATECFGINITF_PCIE 4 +#define M_STATECFGINITF_PCIE 0xffU +#define V_STATECFGINITF_PCIE(x) ((x) << S_STATECFGINITF_PCIE) +#define G_STATECFGINITF_PCIE(x) (((x) >> S_STATECFGINITF_PCIE) & M_STATECFGINITF_PCIE) + +#define S_STATECFGINIT_PCIE 0 +#define M_STATECFGINIT_PCIE 0xfU +#define V_STATECFGINIT_PCIE(x) ((x) << S_STATECFGINIT_PCIE) +#define G_STATECFGINIT_PCIE(x) (((x) >> S_STATECFGINIT_PCIE) & M_STATECFGINIT_PCIE) + #define A_PCIE_CRS 0x56f8 #define S_CRS_ENABLE 0 @@ -4467,6 +5471,10 @@ #define V_LTSSM_ENABLE(x) ((x) << S_LTSSM_ENABLE) #define F_LTSSM_ENABLE V_LTSSM_ENABLE(1U) +#define S_LTSSM_STALL_DISABLE 1 +#define V_LTSSM_STALL_DISABLE(x) ((x) << S_LTSSM_STALL_DISABLE) +#define F_LTSSM_STALL_DISABLE V_LTSSM_STALL_DISABLE(1U) + #define A_PCIE_CORE_ACK_LATENCY_TIMER_REPLAY_TIMER 0x5700 #define S_REPLAY_TIME_LIMIT 16 @@ -4867,6 +5875,15 @@ #define V_NFTS_GEN2_3(x) ((x) << S_NFTS_GEN2_3) #define G_NFTS_GEN2_3(x) (((x) >> S_NFTS_GEN2_3) & M_NFTS_GEN2_3) +#define S_AUTO_LANE_FLIP_CTRL_EN 16 +#define V_AUTO_LANE_FLIP_CTRL_EN(x) ((x) << S_AUTO_LANE_FLIP_CTRL_EN) +#define F_AUTO_LANE_FLIP_CTRL_EN V_AUTO_LANE_FLIP_CTRL_EN(1U) + +#define S_T6_NUM_LANES 8 +#define M_T6_NUM_LANES 0x1fU +#define V_T6_NUM_LANES(x) ((x) << S_T6_NUM_LANES) +#define G_T6_NUM_LANES(x) (((x) >> S_T6_NUM_LANES) & M_T6_NUM_LANES) + #define A_PCIE_CORE_PHY_STATUS 0x5810 #define A_PCIE_CORE_PHY_CONTROL 0x5814 #define A_PCIE_CORE_GEN3_CONTROL 0x5890 @@ -4993,6 +6010,10 @@ #define V_PIPE_LOOPBACK_EN(x) ((x) << S_PIPE_LOOPBACK_EN) #define F_PIPE_LOOPBACK_EN V_PIPE_LOOPBACK_EN(1U) +#define S_T6_PIPE_LOOPBACK_EN 31 +#define V_T6_PIPE_LOOPBACK_EN(x) ((x) << S_T6_PIPE_LOOPBACK_EN) +#define F_T6_PIPE_LOOPBACK_EN V_T6_PIPE_LOOPBACK_EN(1U) + #define A_PCIE_CORE_DBI_RO_WE 0x58bc #define S_READONLY_WRITEEN 0 @@ -5244,6 +6265,25 @@ #define V_MINTAG(x) ((x) << S_MINTAG) #define G_MINTAG(x) (((x) >> S_MINTAG) & M_MINTAG) +#define S_T6_T5_DMA_MAXREQCNT 20 +#define M_T6_T5_DMA_MAXREQCNT 0x7fU +#define V_T6_T5_DMA_MAXREQCNT(x) ((x) << S_T6_T5_DMA_MAXREQCNT) +#define G_T6_T5_DMA_MAXREQCNT(x) (((x) >> S_T6_T5_DMA_MAXREQCNT) & M_T6_T5_DMA_MAXREQCNT) + +#define S_T6_T5_DMA_MAXRSPCNT 9 +#define M_T6_T5_DMA_MAXRSPCNT 0xffU +#define V_T6_T5_DMA_MAXRSPCNT(x) ((x) << S_T6_T5_DMA_MAXRSPCNT) +#define G_T6_T5_DMA_MAXRSPCNT(x) (((x) >> S_T6_T5_DMA_MAXRSPCNT) & M_T6_T5_DMA_MAXRSPCNT) + +#define S_T6_SEQCHKDIS 8 +#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS) +#define F_T6_SEQCHKDIS V_T6_SEQCHKDIS(1U) + +#define S_T6_MINTAG 0 +#define M_T6_MINTAG 0xffU +#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG) +#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG) + #define A_PCIE_T5_DMA_STAT 0x5944 #define S_DMA_RESPCNT 20 @@ -5261,6 +6301,21 @@ #define V_DMA_WRREQCNT(x) ((x) << S_DMA_WRREQCNT) #define G_DMA_WRREQCNT(x) (((x) >> S_DMA_WRREQCNT) & M_DMA_WRREQCNT) +#define S_T6_DMA_RESPCNT 20 +#define M_T6_DMA_RESPCNT 0x3ffU +#define V_T6_DMA_RESPCNT(x) ((x) << S_T6_DMA_RESPCNT) +#define G_T6_DMA_RESPCNT(x) (((x) >> S_T6_DMA_RESPCNT) & M_T6_DMA_RESPCNT) + +#define S_T6_DMA_RDREQCNT 12 +#define M_T6_DMA_RDREQCNT 0x3fU +#define V_T6_DMA_RDREQCNT(x) ((x) << S_T6_DMA_RDREQCNT) +#define G_T6_DMA_RDREQCNT(x) (((x) >> S_T6_DMA_RDREQCNT) & M_T6_DMA_RDREQCNT) + +#define S_T6_DMA_WRREQCNT 0 +#define M_T6_DMA_WRREQCNT 0x1ffU +#define V_T6_DMA_WRREQCNT(x) ((x) << S_T6_DMA_WRREQCNT) +#define G_T6_DMA_WRREQCNT(x) (((x) >> S_T6_DMA_WRREQCNT) & M_T6_DMA_WRREQCNT) + #define A_PCIE_T5_DMA_STAT2 0x5948 #define S_COOKIECNT 24 @@ -5443,6 +6498,20 @@ #define V_USECMDPOOL(x) ((x) << S_USECMDPOOL) #define F_USECMDPOOL V_USECMDPOOL(1U) +#define S_T6_T5_CMD_MAXRSPCNT 9 +#define M_T6_T5_CMD_MAXRSPCNT 0x3fU +#define V_T6_T5_CMD_MAXRSPCNT(x) ((x) << S_T6_T5_CMD_MAXRSPCNT) +#define G_T6_T5_CMD_MAXRSPCNT(x) (((x) >> S_T6_T5_CMD_MAXRSPCNT) & M_T6_T5_CMD_MAXRSPCNT) + +#define S_T6_USECMDPOOL 8 +#define V_T6_USECMDPOOL(x) ((x) << S_T6_USECMDPOOL) +#define F_T6_USECMDPOOL V_T6_USECMDPOOL(1U) + +#define S_T6_MINTAG 0 +#define M_T6_MINTAG 0xffU +#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG) +#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG) + #define A_PCIE_T5_CMD_STAT 0x5984 #define S_T5_STAT_RSPCNT 20 @@ -5455,6 +6524,16 @@ #define V_RDREQCNT(x) ((x) << S_RDREQCNT) #define G_RDREQCNT(x) (((x) >> S_RDREQCNT) & M_RDREQCNT) +#define S_T6_T5_STAT_RSPCNT 20 +#define M_T6_T5_STAT_RSPCNT 0xffU +#define V_T6_T5_STAT_RSPCNT(x) ((x) << S_T6_T5_STAT_RSPCNT) +#define G_T6_T5_STAT_RSPCNT(x) (((x) >> S_T6_T5_STAT_RSPCNT) & M_T6_T5_STAT_RSPCNT) + +#define S_T6_RDREQCNT 12 +#define M_T6_RDREQCNT 0xfU +#define V_T6_RDREQCNT(x) ((x) << S_T6_RDREQCNT) +#define G_T6_RDREQCNT(x) (((x) >> S_T6_RDREQCNT) & M_T6_RDREQCNT) + #define A_PCIE_CORE_INBOUND_NON_POSTED_REQUESTS_BUFFER_ALLOCATION 0x5988 #define S_IN0H 24 @@ -5779,6 +6858,25 @@ #define V_T5_HMA_MAXRSPCNT(x) ((x) << S_T5_HMA_MAXRSPCNT) #define G_T5_HMA_MAXRSPCNT(x) (((x) >> S_T5_HMA_MAXRSPCNT) & M_T5_HMA_MAXRSPCNT) +#define S_T6_HMA_MAXREQCNT 20 +#define M_T6_HMA_MAXREQCNT 0x7fU +#define V_T6_HMA_MAXREQCNT(x) ((x) << S_T6_HMA_MAXREQCNT) +#define G_T6_HMA_MAXREQCNT(x) (((x) >> S_T6_HMA_MAXREQCNT) & M_T6_HMA_MAXREQCNT) + +#define S_T6_T5_HMA_MAXRSPCNT 9 +#define M_T6_T5_HMA_MAXRSPCNT 0xffU +#define V_T6_T5_HMA_MAXRSPCNT(x) ((x) << S_T6_T5_HMA_MAXRSPCNT) +#define G_T6_T5_HMA_MAXRSPCNT(x) (((x) >> S_T6_T5_HMA_MAXRSPCNT) & M_T6_T5_HMA_MAXRSPCNT) + +#define S_T6_SEQCHKDIS 8 +#define V_T6_SEQCHKDIS(x) ((x) << S_T6_SEQCHKDIS) +#define F_T6_SEQCHKDIS V_T6_SEQCHKDIS(1U) + +#define S_T6_MINTAG 0 +#define M_T6_MINTAG 0xffU +#define V_T6_MINTAG(x) ((x) << S_T6_MINTAG) +#define G_T6_MINTAG(x) (((x) >> S_T6_MINTAG) & M_T6_MINTAG) + #define A_PCIE_CORE_ROOT_COMPLEX_ERROR_SEVERITY 0x59b4 #define S_RLCS 31 @@ -5842,6 +6940,11 @@ #define V_HMA_WRREQCNT(x) ((x) << S_HMA_WRREQCNT) #define G_HMA_WRREQCNT(x) (((x) >> S_HMA_WRREQCNT) & M_HMA_WRREQCNT) +#define S_T6_HMA_RESPCNT 20 +#define M_T6_HMA_RESPCNT 0x3ffU +#define V_T6_HMA_RESPCNT(x) ((x) << S_T6_HMA_RESPCNT) +#define G_T6_HMA_RESPCNT(x) (((x) >> S_T6_HMA_RESPCNT) & M_T6_HMA_RESPCNT) + #define A_PCIE_CORE_ROOT_COMPLEX_INTERRUPT_ENABLE 0x59b8 #define S_RLCI 31 @@ -6398,6 +7501,26 @@ #define V_PIOCPL_PLMRSPPERR(x) ((x) << S_PIOCPL_PLMRSPPERR) #define F_PIOCPL_PLMRSPPERR V_PIOCPL_PLMRSPPERR(1U) +#define S_MA_RSPCTLPERR 26 +#define V_MA_RSPCTLPERR(x) ((x) << S_MA_RSPCTLPERR) +#define F_MA_RSPCTLPERR V_MA_RSPCTLPERR(1U) + +#define S_T6_IPRXDATA_VC0PERR 15 +#define V_T6_IPRXDATA_VC0PERR(x) ((x) << S_T6_IPRXDATA_VC0PERR) +#define F_T6_IPRXDATA_VC0PERR V_T6_IPRXDATA_VC0PERR(1U) + +#define S_T6_IPRXHDR_VC0PERR 14 +#define V_T6_IPRXHDR_VC0PERR(x) ((x) << S_T6_IPRXHDR_VC0PERR) +#define F_T6_IPRXHDR_VC0PERR V_T6_IPRXHDR_VC0PERR(1U) + +#define S_PIOCPL_VDMTXCTLPERR 13 +#define V_PIOCPL_VDMTXCTLPERR(x) ((x) << S_PIOCPL_VDMTXCTLPERR) +#define F_PIOCPL_VDMTXCTLPERR V_PIOCPL_VDMTXCTLPERR(1U) + +#define S_PIOCPL_VDMTXDATAPERR 12 +#define V_PIOCPL_VDMTXDATAPERR(x) ((x) << S_PIOCPL_VDMTXDATAPERR) +#define F_PIOCPL_VDMTXDATAPERR V_PIOCPL_VDMTXDATAPERR(1U) + #define A_PCIE_CORE_GENERAL_PURPOSE_CONTROL_2 0x59d4 #define A_PCIE_RSP_ERR_INT_LOG_EN 0x59d4 @@ -6489,6 +7612,16 @@ #define V_REQVFID(x) ((x) << S_REQVFID) #define G_REQVFID(x) (((x) >> S_REQVFID) & M_REQVFID) +#define S_T6_ADDR10B 9 +#define M_T6_ADDR10B 0x3ffU +#define V_T6_ADDR10B(x) ((x) << S_T6_ADDR10B) +#define G_T6_ADDR10B(x) (((x) >> S_T6_ADDR10B) & M_T6_ADDR10B) + +#define S_T6_REQVFID 0 +#define M_T6_REQVFID 0x1ffU +#define V_T6_REQVFID(x) ((x) << S_T6_REQVFID) +#define G_T6_REQVFID(x) (((x) >> S_T6_REQVFID) & M_T6_REQVFID) + #define A_PCIE_CHANGESET 0x59fc #define A_PCIE_REVISION 0x5a00 #define A_PCIE_PDEBUG_INDEX 0x5a04 @@ -6503,6 +7636,16 @@ #define V_PDEBUGSELL(x) ((x) << S_PDEBUGSELL) #define G_PDEBUGSELL(x) (((x) >> S_PDEBUGSELL) & M_PDEBUGSELL) +#define S_T6_PDEBUGSELH 16 +#define M_T6_PDEBUGSELH 0x7fU +#define V_T6_PDEBUGSELH(x) ((x) << S_T6_PDEBUGSELH) +#define G_T6_PDEBUGSELH(x) (((x) >> S_T6_PDEBUGSELH) & M_T6_PDEBUGSELH) + +#define S_T6_PDEBUGSELL 0 +#define M_T6_PDEBUGSELL 0x7fU +#define V_T6_PDEBUGSELL(x) ((x) << S_T6_PDEBUGSELL) +#define G_T6_PDEBUGSELL(x) (((x) >> S_T6_PDEBUGSELL) & M_T6_PDEBUGSELL) + #define A_PCIE_PDEBUG_DATA_HIGH 0x5a08 #define A_PCIE_PDEBUG_DATA_LOW 0x5a0c #define A_PCIE_CDEBUG_INDEX 0x5a10 @@ -6693,6 +7836,34 @@ #define V_PL_TOVF(x) ((x) << S_PL_TOVF) #define G_PL_TOVF(x) (((x) >> S_PL_TOVF) & M_PL_TOVF) +#define S_T6_SOURCE 17 +#define M_T6_SOURCE 0x3U +#define V_T6_SOURCE(x) ((x) << S_T6_SOURCE) +#define G_T6_SOURCE(x) (((x) >> S_T6_SOURCE) & M_T6_SOURCE) + +#define S_T6_DBI_WRITE 13 +#define M_T6_DBI_WRITE 0xfU +#define V_T6_DBI_WRITE(x) ((x) << S_T6_DBI_WRITE) +#define G_T6_DBI_WRITE(x) (((x) >> S_T6_DBI_WRITE) & M_T6_DBI_WRITE) + +#define S_T6_DBI_CS2 12 +#define V_T6_DBI_CS2(x) ((x) << S_T6_DBI_CS2) +#define F_T6_DBI_CS2 V_T6_DBI_CS2(1U) + +#define S_T6_DBI_PF 9 +#define M_T6_DBI_PF 0x7U +#define V_T6_DBI_PF(x) ((x) << S_T6_DBI_PF) +#define G_T6_DBI_PF(x) (((x) >> S_T6_DBI_PF) & M_T6_DBI_PF) + +#define S_T6_PL_TOVFVLD 8 +#define V_T6_PL_TOVFVLD(x) ((x) << S_T6_PL_TOVFVLD) +#define F_T6_PL_TOVFVLD V_T6_PL_TOVFVLD(1U) + +#define S_T6_PL_TOVF 0 +#define M_T6_PL_TOVF 0xffU +#define V_T6_PL_TOVF(x) ((x) << S_T6_PL_TOVF) +#define G_T6_PL_TOVF(x) (((x) >> S_T6_PL_TOVF) & M_T6_PL_TOVF) + #define A_PCIE_MSI_EN_0 0x5aa0 #define A_PCIE_MSI_EN_1 0x5aa4 #define A_PCIE_MSI_EN_2 0x5aa8 @@ -6898,7 +8069,192 @@ #define V_LNA_RXPWRSTATE(x) ((x) << S_LNA_RXPWRSTATE) #define G_LNA_RXPWRSTATE(x) (((x) >> S_LNA_RXPWRSTATE) & M_LNA_RXPWRSTATE) +#define S_REQ_LNH_RXSTATEDONE 31 +#define V_REQ_LNH_RXSTATEDONE(x) ((x) << S_REQ_LNH_RXSTATEDONE) +#define F_REQ_LNH_RXSTATEDONE V_REQ_LNH_RXSTATEDONE(1U) + +#define S_REQ_LNH_RXSTATEREQ 30 +#define V_REQ_LNH_RXSTATEREQ(x) ((x) << S_REQ_LNH_RXSTATEREQ) +#define F_REQ_LNH_RXSTATEREQ V_REQ_LNH_RXSTATEREQ(1U) + +#define S_REQ_LNH_RXPWRSTATE 28 +#define M_REQ_LNH_RXPWRSTATE 0x3U +#define V_REQ_LNH_RXPWRSTATE(x) ((x) << S_REQ_LNH_RXPWRSTATE) +#define G_REQ_LNH_RXPWRSTATE(x) (((x) >> S_REQ_LNH_RXPWRSTATE) & M_REQ_LNH_RXPWRSTATE) + +#define S_REQ_LNG_RXSTATEDONE 27 +#define V_REQ_LNG_RXSTATEDONE(x) ((x) << S_REQ_LNG_RXSTATEDONE) +#define F_REQ_LNG_RXSTATEDONE V_REQ_LNG_RXSTATEDONE(1U) + +#define S_REQ_LNG_RXSTATEREQ 26 +#define V_REQ_LNG_RXSTATEREQ(x) ((x) << S_REQ_LNG_RXSTATEREQ) +#define F_REQ_LNG_RXSTATEREQ V_REQ_LNG_RXSTATEREQ(1U) + +#define S_REQ_LNG_RXPWRSTATE 24 +#define M_REQ_LNG_RXPWRSTATE 0x3U +#define V_REQ_LNG_RXPWRSTATE(x) ((x) << S_REQ_LNG_RXPWRSTATE) +#define G_REQ_LNG_RXPWRSTATE(x) (((x) >> S_REQ_LNG_RXPWRSTATE) & M_REQ_LNG_RXPWRSTATE) + +#define S_REQ_LNF_RXSTATEDONE 23 +#define V_REQ_LNF_RXSTATEDONE(x) ((x) << S_REQ_LNF_RXSTATEDONE) +#define F_REQ_LNF_RXSTATEDONE V_REQ_LNF_RXSTATEDONE(1U) + +#define S_REQ_LNF_RXSTATEREQ 22 +#define V_REQ_LNF_RXSTATEREQ(x) ((x) << S_REQ_LNF_RXSTATEREQ) +#define F_REQ_LNF_RXSTATEREQ V_REQ_LNF_RXSTATEREQ(1U) + +#define S_REQ_LNF_RXPWRSTATE 20 +#define M_REQ_LNF_RXPWRSTATE 0x3U +#define V_REQ_LNF_RXPWRSTATE(x) ((x) << S_REQ_LNF_RXPWRSTATE) +#define G_REQ_LNF_RXPWRSTATE(x) (((x) >> S_REQ_LNF_RXPWRSTATE) & M_REQ_LNF_RXPWRSTATE) + +#define S_REQ_LNE_RXSTATEDONE 19 +#define V_REQ_LNE_RXSTATEDONE(x) ((x) << S_REQ_LNE_RXSTATEDONE) +#define F_REQ_LNE_RXSTATEDONE V_REQ_LNE_RXSTATEDONE(1U) + +#define S_REQ_LNE_RXSTATEREQ 18 +#define V_REQ_LNE_RXSTATEREQ(x) ((x) << S_REQ_LNE_RXSTATEREQ) +#define F_REQ_LNE_RXSTATEREQ V_REQ_LNE_RXSTATEREQ(1U) + +#define S_REQ_LNE_RXPWRSTATE 16 +#define M_REQ_LNE_RXPWRSTATE 0x3U +#define V_REQ_LNE_RXPWRSTATE(x) ((x) << S_REQ_LNE_RXPWRSTATE) +#define G_REQ_LNE_RXPWRSTATE(x) (((x) >> S_REQ_LNE_RXPWRSTATE) & M_REQ_LNE_RXPWRSTATE) + +#define S_REQ_LND_RXSTATEDONE 15 +#define V_REQ_LND_RXSTATEDONE(x) ((x) << S_REQ_LND_RXSTATEDONE) +#define F_REQ_LND_RXSTATEDONE V_REQ_LND_RXSTATEDONE(1U) + +#define S_REQ_LND_RXSTATEREQ 14 +#define V_REQ_LND_RXSTATEREQ(x) ((x) << S_REQ_LND_RXSTATEREQ) +#define F_REQ_LND_RXSTATEREQ V_REQ_LND_RXSTATEREQ(1U) + +#define S_REQ_LND_RXPWRSTATE 12 +#define M_REQ_LND_RXPWRSTATE 0x3U +#define V_REQ_LND_RXPWRSTATE(x) ((x) << S_REQ_LND_RXPWRSTATE) +#define G_REQ_LND_RXPWRSTATE(x) (((x) >> S_REQ_LND_RXPWRSTATE) & M_REQ_LND_RXPWRSTATE) + +#define S_REQ_LNC_RXSTATEDONE 11 +#define V_REQ_LNC_RXSTATEDONE(x) ((x) << S_REQ_LNC_RXSTATEDONE) +#define F_REQ_LNC_RXSTATEDONE V_REQ_LNC_RXSTATEDONE(1U) + +#define S_REQ_LNC_RXSTATEREQ 10 +#define V_REQ_LNC_RXSTATEREQ(x) ((x) << S_REQ_LNC_RXSTATEREQ) +#define F_REQ_LNC_RXSTATEREQ V_REQ_LNC_RXSTATEREQ(1U) + +#define S_REQ_LNC_RXPWRSTATE 8 +#define M_REQ_LNC_RXPWRSTATE 0x3U +#define V_REQ_LNC_RXPWRSTATE(x) ((x) << S_REQ_LNC_RXPWRSTATE) +#define G_REQ_LNC_RXPWRSTATE(x) (((x) >> S_REQ_LNC_RXPWRSTATE) & M_REQ_LNC_RXPWRSTATE) + +#define S_REQ_LNB_RXSTATEDONE 7 +#define V_REQ_LNB_RXSTATEDONE(x) ((x) << S_REQ_LNB_RXSTATEDONE) +#define F_REQ_LNB_RXSTATEDONE V_REQ_LNB_RXSTATEDONE(1U) + +#define S_REQ_LNB_RXSTATEREQ 6 +#define V_REQ_LNB_RXSTATEREQ(x) ((x) << S_REQ_LNB_RXSTATEREQ) +#define F_REQ_LNB_RXSTATEREQ V_REQ_LNB_RXSTATEREQ(1U) + +#define S_REQ_LNB_RXPWRSTATE 4 +#define M_REQ_LNB_RXPWRSTATE 0x3U +#define V_REQ_LNB_RXPWRSTATE(x) ((x) << S_REQ_LNB_RXPWRSTATE) +#define G_REQ_LNB_RXPWRSTATE(x) (((x) >> S_REQ_LNB_RXPWRSTATE) & M_REQ_LNB_RXPWRSTATE) + +#define S_REQ_LNA_RXSTATEDONE 3 +#define V_REQ_LNA_RXSTATEDONE(x) ((x) << S_REQ_LNA_RXSTATEDONE) +#define F_REQ_LNA_RXSTATEDONE V_REQ_LNA_RXSTATEDONE(1U) + +#define S_REQ_LNA_RXSTATEREQ 2 +#define V_REQ_LNA_RXSTATEREQ(x) ((x) << S_REQ_LNA_RXSTATEREQ) +#define F_REQ_LNA_RXSTATEREQ V_REQ_LNA_RXSTATEREQ(1U) + +#define S_REQ_LNA_RXPWRSTATE 0 +#define M_REQ_LNA_RXPWRSTATE 0x3U +#define V_REQ_LNA_RXPWRSTATE(x) ((x) << S_REQ_LNA_RXPWRSTATE) +#define G_REQ_LNA_RXPWRSTATE(x) (((x) >> S_REQ_LNA_RXPWRSTATE) & M_REQ_LNA_RXPWRSTATE) + #define A_PCIE_PHY_CURRXPWR 0x5ba4 + +#define S_T5_LNH_RXPWRSTATE 28 +#define M_T5_LNH_RXPWRSTATE 0x7U +#define V_T5_LNH_RXPWRSTATE(x) ((x) << S_T5_LNH_RXPWRSTATE) +#define G_T5_LNH_RXPWRSTATE(x) (((x) >> S_T5_LNH_RXPWRSTATE) & M_T5_LNH_RXPWRSTATE) + +#define S_T5_LNG_RXPWRSTATE 24 +#define M_T5_LNG_RXPWRSTATE 0x7U +#define V_T5_LNG_RXPWRSTATE(x) ((x) << S_T5_LNG_RXPWRSTATE) +#define G_T5_LNG_RXPWRSTATE(x) (((x) >> S_T5_LNG_RXPWRSTATE) & M_T5_LNG_RXPWRSTATE) + +#define S_T5_LNF_RXPWRSTATE 20 +#define M_T5_LNF_RXPWRSTATE 0x7U +#define V_T5_LNF_RXPWRSTATE(x) ((x) << S_T5_LNF_RXPWRSTATE) +#define G_T5_LNF_RXPWRSTATE(x) (((x) >> S_T5_LNF_RXPWRSTATE) & M_T5_LNF_RXPWRSTATE) + +#define S_T5_LNE_RXPWRSTATE 16 +#define M_T5_LNE_RXPWRSTATE 0x7U +#define V_T5_LNE_RXPWRSTATE(x) ((x) << S_T5_LNE_RXPWRSTATE) +#define G_T5_LNE_RXPWRSTATE(x) (((x) >> S_T5_LNE_RXPWRSTATE) & M_T5_LNE_RXPWRSTATE) + +#define S_T5_LND_RXPWRSTATE 12 +#define M_T5_LND_RXPWRSTATE 0x7U +#define V_T5_LND_RXPWRSTATE(x) ((x) << S_T5_LND_RXPWRSTATE) +#define G_T5_LND_RXPWRSTATE(x) (((x) >> S_T5_LND_RXPWRSTATE) & M_T5_LND_RXPWRSTATE) + +#define S_T5_LNC_RXPWRSTATE 8 +#define M_T5_LNC_RXPWRSTATE 0x7U +#define V_T5_LNC_RXPWRSTATE(x) ((x) << S_T5_LNC_RXPWRSTATE) +#define G_T5_LNC_RXPWRSTATE(x) (((x) >> S_T5_LNC_RXPWRSTATE) & M_T5_LNC_RXPWRSTATE) + +#define S_T5_LNB_RXPWRSTATE 4 +#define M_T5_LNB_RXPWRSTATE 0x7U +#define V_T5_LNB_RXPWRSTATE(x) ((x) << S_T5_LNB_RXPWRSTATE) +#define G_T5_LNB_RXPWRSTATE(x) (((x) >> S_T5_LNB_RXPWRSTATE) & M_T5_LNB_RXPWRSTATE) + +#define S_T5_LNA_RXPWRSTATE 0 +#define M_T5_LNA_RXPWRSTATE 0x7U +#define V_T5_LNA_RXPWRSTATE(x) ((x) << S_T5_LNA_RXPWRSTATE) +#define G_T5_LNA_RXPWRSTATE(x) (((x) >> S_T5_LNA_RXPWRSTATE) & M_T5_LNA_RXPWRSTATE) + +#define S_CUR_LNH_RXPWRSTATE 28 +#define M_CUR_LNH_RXPWRSTATE 0x7U +#define V_CUR_LNH_RXPWRSTATE(x) ((x) << S_CUR_LNH_RXPWRSTATE) +#define G_CUR_LNH_RXPWRSTATE(x) (((x) >> S_CUR_LNH_RXPWRSTATE) & M_CUR_LNH_RXPWRSTATE) + +#define S_CUR_LNG_RXPWRSTATE 24 +#define M_CUR_LNG_RXPWRSTATE 0x7U +#define V_CUR_LNG_RXPWRSTATE(x) ((x) << S_CUR_LNG_RXPWRSTATE) +#define G_CUR_LNG_RXPWRSTATE(x) (((x) >> S_CUR_LNG_RXPWRSTATE) & M_CUR_LNG_RXPWRSTATE) + +#define S_CUR_LNF_RXPWRSTATE 20 +#define M_CUR_LNF_RXPWRSTATE 0x7U +#define V_CUR_LNF_RXPWRSTATE(x) ((x) << S_CUR_LNF_RXPWRSTATE) +#define G_CUR_LNF_RXPWRSTATE(x) (((x) >> S_CUR_LNF_RXPWRSTATE) & M_CUR_LNF_RXPWRSTATE) + +#define S_CUR_LNE_RXPWRSTATE 16 +#define M_CUR_LNE_RXPWRSTATE 0x7U +#define V_CUR_LNE_RXPWRSTATE(x) ((x) << S_CUR_LNE_RXPWRSTATE) +#define G_CUR_LNE_RXPWRSTATE(x) (((x) >> S_CUR_LNE_RXPWRSTATE) & M_CUR_LNE_RXPWRSTATE) + +#define S_CUR_LND_RXPWRSTATE 12 +#define M_CUR_LND_RXPWRSTATE 0x7U +#define V_CUR_LND_RXPWRSTATE(x) ((x) << S_CUR_LND_RXPWRSTATE) +#define G_CUR_LND_RXPWRSTATE(x) (((x) >> S_CUR_LND_RXPWRSTATE) & M_CUR_LND_RXPWRSTATE) + +#define S_CUR_LNC_RXPWRSTATE 8 +#define M_CUR_LNC_RXPWRSTATE 0x7U +#define V_CUR_LNC_RXPWRSTATE(x) ((x) << S_CUR_LNC_RXPWRSTATE) +#define G_CUR_LNC_RXPWRSTATE(x) (((x) >> S_CUR_LNC_RXPWRSTATE) & M_CUR_LNC_RXPWRSTATE) + +#define S_CUR_LNB_RXPWRSTATE 4 +#define M_CUR_LNB_RXPWRSTATE 0x7U +#define V_CUR_LNB_RXPWRSTATE(x) ((x) << S_CUR_LNB_RXPWRSTATE) +#define G_CUR_LNB_RXPWRSTATE(x) (((x) >> S_CUR_LNB_RXPWRSTATE) & M_CUR_LNB_RXPWRSTATE) + +#define S_CUR_LNA_RXPWRSTATE 0 +#define M_CUR_LNA_RXPWRSTATE 0x7U +#define V_CUR_LNA_RXPWRSTATE(x) ((x) << S_CUR_LNA_RXPWRSTATE) +#define G_CUR_LNA_RXPWRSTATE(x) (((x) >> S_CUR_LNA_RXPWRSTATE) & M_CUR_LNA_RXPWRSTATE) + #define A_PCIE_PHY_GEN3_AE0 0x5ba8 #define S_LND_STAT 28 @@ -7086,6 +8442,11 @@ #define V_COEFFSTART(x) ((x) << S_COEFFSTART) #define F_COEFFSTART V_COEFFSTART(1U) +#define S_T6_COEFFLANE 8 +#define M_T6_COEFFLANE 0xfU +#define V_T6_COEFFLANE(x) ((x) << S_T6_COEFFLANE) +#define G_T6_COEFFLANE(x) (((x) >> S_T6_COEFFLANE) & M_T6_COEFFLANE) + #define A_PCIE_PHY_PRESET_COEFF 0x5bc4 #define S_COEFF 0 @@ -7107,6 +8468,3026 @@ #define A_PCIE_PHY_INDIR_DATA 0x5bf4 #define A_PCIE_STATIC_SPARE1 0x5bf8 #define A_PCIE_STATIC_SPARE2 0x5bfc +#define A_PCIE_KDOORBELL_GTS_PF_BASE_LEN 0x5c10 + +#define S_KDB_PF_LEN 24 +#define M_KDB_PF_LEN 0x1fU +#define V_KDB_PF_LEN(x) ((x) << S_KDB_PF_LEN) +#define G_KDB_PF_LEN(x) (((x) >> S_KDB_PF_LEN) & M_KDB_PF_LEN) + +#define S_KDB_PF_BASEADDR 0 +#define M_KDB_PF_BASEADDR 0xfffffU +#define V_KDB_PF_BASEADDR(x) ((x) << S_KDB_PF_BASEADDR) +#define G_KDB_PF_BASEADDR(x) (((x) >> S_KDB_PF_BASEADDR) & M_KDB_PF_BASEADDR) + +#define A_PCIE_KDOORBELL_GTS_VF_BASE_LEN 0x5c14 + +#define S_KDB_VF_LEN 24 +#define M_KDB_VF_LEN 0x1fU +#define V_KDB_VF_LEN(x) ((x) << S_KDB_VF_LEN) +#define G_KDB_VF_LEN(x) (((x) >> S_KDB_VF_LEN) & M_KDB_VF_LEN) + +#define S_KDB_VF_BASEADDR 0 +#define M_KDB_VF_BASEADDR 0xfffffU +#define V_KDB_VF_BASEADDR(x) ((x) << S_KDB_VF_BASEADDR) +#define G_KDB_VF_BASEADDR(x) (((x) >> S_KDB_VF_BASEADDR) & M_KDB_VF_BASEADDR) + +#define A_PCIE_KDOORBELL_GTS_VF_OFFSET 0x5c18 + +#define S_KDB_VF_MODOFST 0 +#define M_KDB_VF_MODOFST 0xfffU +#define V_KDB_VF_MODOFST(x) ((x) << S_KDB_VF_MODOFST) +#define G_KDB_VF_MODOFST(x) (((x) >> S_KDB_VF_MODOFST) & M_KDB_VF_MODOFST) + +#define A_PCIE_PHY_REQRXPWR1 0x5c1c + +#define S_REQ_LNP_RXSTATEDONE 31 +#define V_REQ_LNP_RXSTATEDONE(x) ((x) << S_REQ_LNP_RXSTATEDONE) +#define F_REQ_LNP_RXSTATEDONE V_REQ_LNP_RXSTATEDONE(1U) + +#define S_REQ_LNP_RXSTATEREQ 30 +#define V_REQ_LNP_RXSTATEREQ(x) ((x) << S_REQ_LNP_RXSTATEREQ) +#define F_REQ_LNP_RXSTATEREQ V_REQ_LNP_RXSTATEREQ(1U) + +#define S_REQ_LNP_RXPWRSTATE 28 +#define M_REQ_LNP_RXPWRSTATE 0x3U +#define V_REQ_LNP_RXPWRSTATE(x) ((x) << S_REQ_LNP_RXPWRSTATE) +#define G_REQ_LNP_RXPWRSTATE(x) (((x) >> S_REQ_LNP_RXPWRSTATE) & M_REQ_LNP_RXPWRSTATE) + +#define S_REQ_LNO_RXSTATEDONE 27 +#define V_REQ_LNO_RXSTATEDONE(x) ((x) << S_REQ_LNO_RXSTATEDONE) +#define F_REQ_LNO_RXSTATEDONE V_REQ_LNO_RXSTATEDONE(1U) + +#define S_REQ_LNO_RXSTATEREQ 26 +#define V_REQ_LNO_RXSTATEREQ(x) ((x) << S_REQ_LNO_RXSTATEREQ) +#define F_REQ_LNO_RXSTATEREQ V_REQ_LNO_RXSTATEREQ(1U) + +#define S_REQ_LNO_RXPWRSTATE 24 +#define M_REQ_LNO_RXPWRSTATE 0x3U +#define V_REQ_LNO_RXPWRSTATE(x) ((x) << S_REQ_LNO_RXPWRSTATE) +#define G_REQ_LNO_RXPWRSTATE(x) (((x) >> S_REQ_LNO_RXPWRSTATE) & M_REQ_LNO_RXPWRSTATE) + +#define S_REQ_LNN_RXSTATEDONE 23 +#define V_REQ_LNN_RXSTATEDONE(x) ((x) << S_REQ_LNN_RXSTATEDONE) +#define F_REQ_LNN_RXSTATEDONE V_REQ_LNN_RXSTATEDONE(1U) + +#define S_REQ_LNN_RXSTATEREQ 22 +#define V_REQ_LNN_RXSTATEREQ(x) ((x) << S_REQ_LNN_RXSTATEREQ) +#define F_REQ_LNN_RXSTATEREQ V_REQ_LNN_RXSTATEREQ(1U) + +#define S_REQ_LNN_RXPWRSTATE 20 +#define M_REQ_LNN_RXPWRSTATE 0x3U +#define V_REQ_LNN_RXPWRSTATE(x) ((x) << S_REQ_LNN_RXPWRSTATE) +#define G_REQ_LNN_RXPWRSTATE(x) (((x) >> S_REQ_LNN_RXPWRSTATE) & M_REQ_LNN_RXPWRSTATE) + +#define S_REQ_LNM_RXSTATEDONE 19 +#define V_REQ_LNM_RXSTATEDONE(x) ((x) << S_REQ_LNM_RXSTATEDONE) +#define F_REQ_LNM_RXSTATEDONE V_REQ_LNM_RXSTATEDONE(1U) + +#define S_REQ_LNM_RXSTATEREQ 18 +#define V_REQ_LNM_RXSTATEREQ(x) ((x) << S_REQ_LNM_RXSTATEREQ) +#define F_REQ_LNM_RXSTATEREQ V_REQ_LNM_RXSTATEREQ(1U) + +#define S_REQ_LNM_RXPWRSTATE 16 +#define M_REQ_LNM_RXPWRSTATE 0x3U +#define V_REQ_LNM_RXPWRSTATE(x) ((x) << S_REQ_LNM_RXPWRSTATE) +#define G_REQ_LNM_RXPWRSTATE(x) (((x) >> S_REQ_LNM_RXPWRSTATE) & M_REQ_LNM_RXPWRSTATE) + +#define S_REQ_LNL_RXSTATEDONE 15 +#define V_REQ_LNL_RXSTATEDONE(x) ((x) << S_REQ_LNL_RXSTATEDONE) +#define F_REQ_LNL_RXSTATEDONE V_REQ_LNL_RXSTATEDONE(1U) + +#define S_REQ_LNL_RXSTATEREQ 14 +#define V_REQ_LNL_RXSTATEREQ(x) ((x) << S_REQ_LNL_RXSTATEREQ) +#define F_REQ_LNL_RXSTATEREQ V_REQ_LNL_RXSTATEREQ(1U) + +#define S_REQ_LNL_RXPWRSTATE 12 +#define M_REQ_LNL_RXPWRSTATE 0x3U +#define V_REQ_LNL_RXPWRSTATE(x) ((x) << S_REQ_LNL_RXPWRSTATE) +#define G_REQ_LNL_RXPWRSTATE(x) (((x) >> S_REQ_LNL_RXPWRSTATE) & M_REQ_LNL_RXPWRSTATE) + +#define S_REQ_LNK_RXSTATEDONE 11 +#define V_REQ_LNK_RXSTATEDONE(x) ((x) << S_REQ_LNK_RXSTATEDONE) +#define F_REQ_LNK_RXSTATEDONE V_REQ_LNK_RXSTATEDONE(1U) + +#define S_REQ_LNK_RXSTATEREQ 10 +#define V_REQ_LNK_RXSTATEREQ(x) ((x) << S_REQ_LNK_RXSTATEREQ) +#define F_REQ_LNK_RXSTATEREQ V_REQ_LNK_RXSTATEREQ(1U) + +#define S_REQ_LNK_RXPWRSTATE 8 +#define M_REQ_LNK_RXPWRSTATE 0x3U +#define V_REQ_LNK_RXPWRSTATE(x) ((x) << S_REQ_LNK_RXPWRSTATE) +#define G_REQ_LNK_RXPWRSTATE(x) (((x) >> S_REQ_LNK_RXPWRSTATE) & M_REQ_LNK_RXPWRSTATE) + +#define S_REQ_LNJ_RXSTATEDONE 7 +#define V_REQ_LNJ_RXSTATEDONE(x) ((x) << S_REQ_LNJ_RXSTATEDONE) +#define F_REQ_LNJ_RXSTATEDONE V_REQ_LNJ_RXSTATEDONE(1U) + +#define S_REQ_LNJ_RXSTATEREQ 6 +#define V_REQ_LNJ_RXSTATEREQ(x) ((x) << S_REQ_LNJ_RXSTATEREQ) +#define F_REQ_LNJ_RXSTATEREQ V_REQ_LNJ_RXSTATEREQ(1U) + +#define S_REQ_LNJ_RXPWRSTATE 4 +#define M_REQ_LNJ_RXPWRSTATE 0x3U +#define V_REQ_LNJ_RXPWRSTATE(x) ((x) << S_REQ_LNJ_RXPWRSTATE) +#define G_REQ_LNJ_RXPWRSTATE(x) (((x) >> S_REQ_LNJ_RXPWRSTATE) & M_REQ_LNJ_RXPWRSTATE) + +#define S_REQ_LNI_RXSTATEDONE 3 +#define V_REQ_LNI_RXSTATEDONE(x) ((x) << S_REQ_LNI_RXSTATEDONE) +#define F_REQ_LNI_RXSTATEDONE V_REQ_LNI_RXSTATEDONE(1U) + +#define S_REQ_LNI_RXSTATEREQ 2 +#define V_REQ_LNI_RXSTATEREQ(x) ((x) << S_REQ_LNI_RXSTATEREQ) +#define F_REQ_LNI_RXSTATEREQ V_REQ_LNI_RXSTATEREQ(1U) + +#define S_REQ_LNI_RXPWRSTATE 0 +#define M_REQ_LNI_RXPWRSTATE 0x3U +#define V_REQ_LNI_RXPWRSTATE(x) ((x) << S_REQ_LNI_RXPWRSTATE) +#define G_REQ_LNI_RXPWRSTATE(x) (((x) >> S_REQ_LNI_RXPWRSTATE) & M_REQ_LNI_RXPWRSTATE) + +#define A_PCIE_PHY_CURRXPWR1 0x5c20 + +#define S_CUR_LNP_RXPWRSTATE 28 +#define M_CUR_LNP_RXPWRSTATE 0x7U +#define V_CUR_LNP_RXPWRSTATE(x) ((x) << S_CUR_LNP_RXPWRSTATE) +#define G_CUR_LNP_RXPWRSTATE(x) (((x) >> S_CUR_LNP_RXPWRSTATE) & M_CUR_LNP_RXPWRSTATE) + +#define S_CUR_LNO_RXPWRSTATE 24 +#define M_CUR_LNO_RXPWRSTATE 0x7U +#define V_CUR_LNO_RXPWRSTATE(x) ((x) << S_CUR_LNO_RXPWRSTATE) +#define G_CUR_LNO_RXPWRSTATE(x) (((x) >> S_CUR_LNO_RXPWRSTATE) & M_CUR_LNO_RXPWRSTATE) + +#define S_CUR_LNN_RXPWRSTATE 20 +#define M_CUR_LNN_RXPWRSTATE 0x7U +#define V_CUR_LNN_RXPWRSTATE(x) ((x) << S_CUR_LNN_RXPWRSTATE) +#define G_CUR_LNN_RXPWRSTATE(x) (((x) >> S_CUR_LNN_RXPWRSTATE) & M_CUR_LNN_RXPWRSTATE) + +#define S_CUR_LNM_RXPWRSTATE 16 +#define M_CUR_LNM_RXPWRSTATE 0x7U +#define V_CUR_LNM_RXPWRSTATE(x) ((x) << S_CUR_LNM_RXPWRSTATE) +#define G_CUR_LNM_RXPWRSTATE(x) (((x) >> S_CUR_LNM_RXPWRSTATE) & M_CUR_LNM_RXPWRSTATE) + +#define S_CUR_LNL_RXPWRSTATE 12 +#define M_CUR_LNL_RXPWRSTATE 0x7U +#define V_CUR_LNL_RXPWRSTATE(x) ((x) << S_CUR_LNL_RXPWRSTATE) +#define G_CUR_LNL_RXPWRSTATE(x) (((x) >> S_CUR_LNL_RXPWRSTATE) & M_CUR_LNL_RXPWRSTATE) + +#define S_CUR_LNK_RXPWRSTATE 8 +#define M_CUR_LNK_RXPWRSTATE 0x7U +#define V_CUR_LNK_RXPWRSTATE(x) ((x) << S_CUR_LNK_RXPWRSTATE) +#define G_CUR_LNK_RXPWRSTATE(x) (((x) >> S_CUR_LNK_RXPWRSTATE) & M_CUR_LNK_RXPWRSTATE) + +#define S_CUR_LNJ_RXPWRSTATE 4 +#define M_CUR_LNJ_RXPWRSTATE 0x7U +#define V_CUR_LNJ_RXPWRSTATE(x) ((x) << S_CUR_LNJ_RXPWRSTATE) +#define G_CUR_LNJ_RXPWRSTATE(x) (((x) >> S_CUR_LNJ_RXPWRSTATE) & M_CUR_LNJ_RXPWRSTATE) + +#define S_CUR_LNI_RXPWRSTATE 0 +#define M_CUR_LNI_RXPWRSTATE 0x7U +#define V_CUR_LNI_RXPWRSTATE(x) ((x) << S_CUR_LNI_RXPWRSTATE) +#define G_CUR_LNI_RXPWRSTATE(x) (((x) >> S_CUR_LNI_RXPWRSTATE) & M_CUR_LNI_RXPWRSTATE) + +#define A_PCIE_PHY_GEN3_AE2 0x5c24 + +#define S_LNL_STAT 28 +#define M_LNL_STAT 0x7U +#define V_LNL_STAT(x) ((x) << S_LNL_STAT) +#define G_LNL_STAT(x) (((x) >> S_LNL_STAT) & M_LNL_STAT) + +#define S_LNL_CMD 24 +#define M_LNL_CMD 0x7U +#define V_LNL_CMD(x) ((x) << S_LNL_CMD) +#define G_LNL_CMD(x) (((x) >> S_LNL_CMD) & M_LNL_CMD) + +#define S_LNK_STAT 20 +#define M_LNK_STAT 0x7U +#define V_LNK_STAT(x) ((x) << S_LNK_STAT) +#define G_LNK_STAT(x) (((x) >> S_LNK_STAT) & M_LNK_STAT) + +#define S_LNK_CMD 16 +#define M_LNK_CMD 0x7U +#define V_LNK_CMD(x) ((x) << S_LNK_CMD) +#define G_LNK_CMD(x) (((x) >> S_LNK_CMD) & M_LNK_CMD) + +#define S_LNJ_STAT 12 +#define M_LNJ_STAT 0x7U +#define V_LNJ_STAT(x) ((x) << S_LNJ_STAT) +#define G_LNJ_STAT(x) (((x) >> S_LNJ_STAT) & M_LNJ_STAT) + +#define S_LNJ_CMD 8 +#define M_LNJ_CMD 0x7U +#define V_LNJ_CMD(x) ((x) << S_LNJ_CMD) +#define G_LNJ_CMD(x) (((x) >> S_LNJ_CMD) & M_LNJ_CMD) + +#define S_LNI_STAT 4 +#define M_LNI_STAT 0x7U +#define V_LNI_STAT(x) ((x) << S_LNI_STAT) +#define G_LNI_STAT(x) (((x) >> S_LNI_STAT) & M_LNI_STAT) + +#define S_LNI_CMD 0 +#define M_LNI_CMD 0x7U +#define V_LNI_CMD(x) ((x) << S_LNI_CMD) +#define G_LNI_CMD(x) (((x) >> S_LNI_CMD) & M_LNI_CMD) + +#define A_PCIE_PHY_GEN3_AE3 0x5c28 + +#define S_LNP_STAT 28 +#define M_LNP_STAT 0x7U +#define V_LNP_STAT(x) ((x) << S_LNP_STAT) +#define G_LNP_STAT(x) (((x) >> S_LNP_STAT) & M_LNP_STAT) + +#define S_LNP_CMD 24 +#define M_LNP_CMD 0x7U +#define V_LNP_CMD(x) ((x) << S_LNP_CMD) +#define G_LNP_CMD(x) (((x) >> S_LNP_CMD) & M_LNP_CMD) + +#define S_LNO_STAT 20 +#define M_LNO_STAT 0x7U +#define V_LNO_STAT(x) ((x) << S_LNO_STAT) +#define G_LNO_STAT(x) (((x) >> S_LNO_STAT) & M_LNO_STAT) + +#define S_LNO_CMD 16 +#define M_LNO_CMD 0x7U +#define V_LNO_CMD(x) ((x) << S_LNO_CMD) +#define G_LNO_CMD(x) (((x) >> S_LNO_CMD) & M_LNO_CMD) + +#define S_LNN_STAT 12 +#define M_LNN_STAT 0x7U +#define V_LNN_STAT(x) ((x) << S_LNN_STAT) +#define G_LNN_STAT(x) (((x) >> S_LNN_STAT) & M_LNN_STAT) + +#define S_LNN_CMD 8 +#define M_LNN_CMD 0x7U +#define V_LNN_CMD(x) ((x) << S_LNN_CMD) +#define G_LNN_CMD(x) (((x) >> S_LNN_CMD) & M_LNN_CMD) + +#define S_LNM_STAT 4 +#define M_LNM_STAT 0x7U +#define V_LNM_STAT(x) ((x) << S_LNM_STAT) +#define G_LNM_STAT(x) (((x) >> S_LNM_STAT) & M_LNM_STAT) + +#define S_LNM_CMD 0 +#define M_LNM_CMD 0x7U +#define V_LNM_CMD(x) ((x) << S_LNM_CMD) +#define G_LNM_CMD(x) (((x) >> S_LNM_CMD) & M_LNM_CMD) + +#define A_PCIE_PHY_FS_LF4 0x5c2c + +#define S_LANE9LF 24 +#define M_LANE9LF 0x3fU +#define V_LANE9LF(x) ((x) << S_LANE9LF) +#define G_LANE9LF(x) (((x) >> S_LANE9LF) & M_LANE9LF) + +#define S_LANE9FS 16 +#define M_LANE9FS 0x3fU +#define V_LANE9FS(x) ((x) << S_LANE9FS) +#define G_LANE9FS(x) (((x) >> S_LANE9FS) & M_LANE9FS) + +#define S_LANE8LF 8 +#define M_LANE8LF 0x3fU +#define V_LANE8LF(x) ((x) << S_LANE8LF) +#define G_LANE8LF(x) (((x) >> S_LANE8LF) & M_LANE8LF) + +#define S_LANE8FS 0 +#define M_LANE8FS 0x3fU +#define V_LANE8FS(x) ((x) << S_LANE8FS) +#define G_LANE8FS(x) (((x) >> S_LANE8FS) & M_LANE8FS) + +#define A_PCIE_PHY_FS_LF5 0x5c30 + +#define S_LANE11LF 24 +#define M_LANE11LF 0x3fU +#define V_LANE11LF(x) ((x) << S_LANE11LF) +#define G_LANE11LF(x) (((x) >> S_LANE11LF) & M_LANE11LF) + +#define S_LANE11FS 16 +#define M_LANE11FS 0x3fU +#define V_LANE11FS(x) ((x) << S_LANE11FS) +#define G_LANE11FS(x) (((x) >> S_LANE11FS) & M_LANE11FS) + +#define S_LANE10LF 8 +#define M_LANE10LF 0x3fU +#define V_LANE10LF(x) ((x) << S_LANE10LF) +#define G_LANE10LF(x) (((x) >> S_LANE10LF) & M_LANE10LF) + +#define S_LANE10FS 0 +#define M_LANE10FS 0x3fU +#define V_LANE10FS(x) ((x) << S_LANE10FS) +#define G_LANE10FS(x) (((x) >> S_LANE10FS) & M_LANE10FS) + +#define A_PCIE_PHY_FS_LF6 0x5c34 + +#define S_LANE13LF 24 +#define M_LANE13LF 0x3fU +#define V_LANE13LF(x) ((x) << S_LANE13LF) +#define G_LANE13LF(x) (((x) >> S_LANE13LF) & M_LANE13LF) + +#define S_LANE13FS 16 +#define M_LANE13FS 0x3fU +#define V_LANE13FS(x) ((x) << S_LANE13FS) +#define G_LANE13FS(x) (((x) >> S_LANE13FS) & M_LANE13FS) + +#define S_LANE12LF 8 +#define M_LANE12LF 0x3fU +#define V_LANE12LF(x) ((x) << S_LANE12LF) +#define G_LANE12LF(x) (((x) >> S_LANE12LF) & M_LANE12LF) + +#define S_LANE12FS 0 +#define M_LANE12FS 0x3fU +#define V_LANE12FS(x) ((x) << S_LANE12FS) +#define G_LANE12FS(x) (((x) >> S_LANE12FS) & M_LANE12FS) + +#define A_PCIE_PHY_FS_LF7 0x5c38 + +#define S_LANE15LF 24 +#define M_LANE15LF 0x3fU +#define V_LANE15LF(x) ((x) << S_LANE15LF) +#define G_LANE15LF(x) (((x) >> S_LANE15LF) & M_LANE15LF) + +#define S_LANE15FS 16 +#define M_LANE15FS 0x3fU +#define V_LANE15FS(x) ((x) << S_LANE15FS) +#define G_LANE15FS(x) (((x) >> S_LANE15FS) & M_LANE15FS) + +#define S_LANE14LF 8 +#define M_LANE14LF 0x3fU +#define V_LANE14LF(x) ((x) << S_LANE14LF) +#define G_LANE14LF(x) (((x) >> S_LANE14LF) & M_LANE14LF) + +#define S_LANE14FS 0 +#define M_LANE14FS 0x3fU +#define V_LANE14FS(x) ((x) << S_LANE14FS) +#define G_LANE14FS(x) (((x) >> S_LANE14FS) & M_LANE14FS) + +#define A_PCIE_MULTI_PHY_INDIR_REQ 0x5c3c + +#define S_PHY_REG_ENABLE 31 +#define V_PHY_REG_ENABLE(x) ((x) << S_PHY_REG_ENABLE) +#define F_PHY_REG_ENABLE V_PHY_REG_ENABLE(1U) + +#define S_PHY_REG_SELECT 22 +#define M_PHY_REG_SELECT 0x3U +#define V_PHY_REG_SELECT(x) ((x) << S_PHY_REG_SELECT) +#define G_PHY_REG_SELECT(x) (((x) >> S_PHY_REG_SELECT) & M_PHY_REG_SELECT) + +#define S_PHY_REG_REGADDR 0 +#define M_PHY_REG_REGADDR 0xffffU +#define V_PHY_REG_REGADDR(x) ((x) << S_PHY_REG_REGADDR) +#define G_PHY_REG_REGADDR(x) (((x) >> S_PHY_REG_REGADDR) & M_PHY_REG_REGADDR) + +#define A_PCIE_MULTI_PHY_INDIR_DATA 0x5c40 + +#define S_PHY_REG_DATA 0 +#define M_PHY_REG_DATA 0xffffU +#define V_PHY_REG_DATA(x) ((x) << S_PHY_REG_DATA) +#define G_PHY_REG_DATA(x) (((x) >> S_PHY_REG_DATA) & M_PHY_REG_DATA) + +#define A_PCIE_VF_INT_INDIR_REQ 0x5c44 + +#define S_ENABLE_VF 24 +#define V_ENABLE_VF(x) ((x) << S_ENABLE_VF) +#define F_ENABLE_VF V_ENABLE_VF(1U) + +#define S_AI_VF 23 +#define V_AI_VF(x) ((x) << S_AI_VF) +#define F_AI_VF V_AI_VF(1U) + +#define S_VFID_PCIE 0 +#define M_VFID_PCIE 0x3ffU +#define V_VFID_PCIE(x) ((x) << S_VFID_PCIE) +#define G_VFID_PCIE(x) (((x) >> S_VFID_PCIE) & M_VFID_PCIE) + +#define A_PCIE_VF_INT_INDIR_DATA 0x5c48 +#define A_PCIE_VF_256_INT_CFG2 0x5c4c +#define A_PCIE_VF_MSI_EN_4 0x5e50 +#define A_PCIE_VF_MSI_EN_5 0x5e54 +#define A_PCIE_VF_MSI_EN_6 0x5e58 +#define A_PCIE_VF_MSI_EN_7 0x5e5c +#define A_PCIE_VF_MSIX_EN_4 0x5e60 +#define A_PCIE_VF_MSIX_EN_5 0x5e64 +#define A_PCIE_VF_MSIX_EN_6 0x5e68 +#define A_PCIE_VF_MSIX_EN_7 0x5e6c +#define A_PCIE_FLR_VF4_STATUS 0x5e70 +#define A_PCIE_FLR_VF5_STATUS 0x5e74 +#define A_PCIE_FLR_VF6_STATUS 0x5e78 +#define A_PCIE_FLR_VF7_STATUS 0x5e7c +#define A_T6_PCIE_BUS_MST_STAT_4 0x5e80 +#define A_T6_PCIE_BUS_MST_STAT_5 0x5e84 +#define A_T6_PCIE_BUS_MST_STAT_6 0x5e88 +#define A_T6_PCIE_BUS_MST_STAT_7 0x5e8c +#define A_PCIE_BUS_MST_STAT_8 0x5e90 + +#define S_BUSMST_263_256 0 +#define M_BUSMST_263_256 0xffU +#define V_BUSMST_263_256(x) ((x) << S_BUSMST_263_256) +#define G_BUSMST_263_256(x) (((x) >> S_BUSMST_263_256) & M_BUSMST_263_256) + +#define A_PCIE_TGT_SKID_FIFO 0x5e94 + +#define S_HDRFREECNT 16 +#define M_HDRFREECNT 0xfffU +#define V_HDRFREECNT(x) ((x) << S_HDRFREECNT) +#define G_HDRFREECNT(x) (((x) >> S_HDRFREECNT) & M_HDRFREECNT) + +#define S_DATAFREECNT 0 +#define M_DATAFREECNT 0xfffU +#define V_DATAFREECNT(x) ((x) << S_DATAFREECNT) +#define G_DATAFREECNT(x) (((x) >> S_DATAFREECNT) & M_DATAFREECNT) + +#define A_T6_PCIE_RSP_ERR_STAT_4 0x5ea0 +#define A_T6_PCIE_RSP_ERR_STAT_5 0x5ea4 +#define A_T6_PCIE_RSP_ERR_STAT_6 0x5ea8 +#define A_T6_PCIE_RSP_ERR_STAT_7 0x5eac +#define A_PCIE_RSP_ERR_STAT_8 0x5eb0 + +#define S_RSPERR_263_256 0 +#define M_RSPERR_263_256 0xffU +#define V_RSPERR_263_256(x) ((x) << S_RSPERR_263_256) +#define G_RSPERR_263_256(x) (((x) >> S_RSPERR_263_256) & M_RSPERR_263_256) + +#define A_PCIE_PHY_STAT1 0x5ec0 + +#define S_PHY0_RTUNE_ACK 31 +#define V_PHY0_RTUNE_ACK(x) ((x) << S_PHY0_RTUNE_ACK) +#define F_PHY0_RTUNE_ACK V_PHY0_RTUNE_ACK(1U) + +#define S_PHY1_RTUNE_ACK 30 +#define V_PHY1_RTUNE_ACK(x) ((x) << S_PHY1_RTUNE_ACK) +#define F_PHY1_RTUNE_ACK V_PHY1_RTUNE_ACK(1U) + +#define A_PCIE_PHY_CTRL1 0x5ec4 + +#define S_PHY0_RTUNE_REQ 31 +#define V_PHY0_RTUNE_REQ(x) ((x) << S_PHY0_RTUNE_REQ) +#define F_PHY0_RTUNE_REQ V_PHY0_RTUNE_REQ(1U) + +#define S_PHY1_RTUNE_REQ 30 +#define V_PHY1_RTUNE_REQ(x) ((x) << S_PHY1_RTUNE_REQ) +#define F_PHY1_RTUNE_REQ V_PHY1_RTUNE_REQ(1U) + +#define S_TXDEEMPH_GEN1 16 +#define M_TXDEEMPH_GEN1 0xffU +#define V_TXDEEMPH_GEN1(x) ((x) << S_TXDEEMPH_GEN1) +#define G_TXDEEMPH_GEN1(x) (((x) >> S_TXDEEMPH_GEN1) & M_TXDEEMPH_GEN1) + +#define S_TXDEEMPH_GEN2_3P5DB 8 +#define M_TXDEEMPH_GEN2_3P5DB 0xffU +#define V_TXDEEMPH_GEN2_3P5DB(x) ((x) << S_TXDEEMPH_GEN2_3P5DB) +#define G_TXDEEMPH_GEN2_3P5DB(x) (((x) >> S_TXDEEMPH_GEN2_3P5DB) & M_TXDEEMPH_GEN2_3P5DB) + +#define S_TXDEEMPH_GEN2_6DB 0 +#define M_TXDEEMPH_GEN2_6DB 0xffU +#define V_TXDEEMPH_GEN2_6DB(x) ((x) << S_TXDEEMPH_GEN2_6DB) +#define G_TXDEEMPH_GEN2_6DB(x) (((x) >> S_TXDEEMPH_GEN2_6DB) & M_TXDEEMPH_GEN2_6DB) + +#define A_PCIE_PCIE_SPARE0 0x5ec8 +#define A_PCIE_RESET_STAT 0x5ecc + +#define S_PON_RST_STATE_FLAG 11 +#define V_PON_RST_STATE_FLAG(x) ((x) << S_PON_RST_STATE_FLAG) +#define F_PON_RST_STATE_FLAG V_PON_RST_STATE_FLAG(1U) + +#define S_BUS_RST_STATE_FLAG 10 +#define V_BUS_RST_STATE_FLAG(x) ((x) << S_BUS_RST_STATE_FLAG) +#define F_BUS_RST_STATE_FLAG V_BUS_RST_STATE_FLAG(1U) + +#define S_DL_DOWN_PCIECRST_MODE0_STATE_FLAG 9 +#define V_DL_DOWN_PCIECRST_MODE0_STATE_FLAG(x) ((x) << S_DL_DOWN_PCIECRST_MODE0_STATE_FLAG) +#define F_DL_DOWN_PCIECRST_MODE0_STATE_FLAG V_DL_DOWN_PCIECRST_MODE0_STATE_FLAG(1U) + +#define S_DL_DOWN_PCIECRST_MODE1_STATE_FLAG 8 +#define V_DL_DOWN_PCIECRST_MODE1_STATE_FLAG(x) ((x) << S_DL_DOWN_PCIECRST_MODE1_STATE_FLAG) +#define F_DL_DOWN_PCIECRST_MODE1_STATE_FLAG V_DL_DOWN_PCIECRST_MODE1_STATE_FLAG(1U) + +#define S_PCIE_WARM_RST_MODE0_STATE_FLAG 7 +#define V_PCIE_WARM_RST_MODE0_STATE_FLAG(x) ((x) << S_PCIE_WARM_RST_MODE0_STATE_FLAG) +#define F_PCIE_WARM_RST_MODE0_STATE_FLAG V_PCIE_WARM_RST_MODE0_STATE_FLAG(1U) + +#define S_PCIE_WARM_RST_MODE1_STATE_FLAG 6 +#define V_PCIE_WARM_RST_MODE1_STATE_FLAG(x) ((x) << S_PCIE_WARM_RST_MODE1_STATE_FLAG) +#define F_PCIE_WARM_RST_MODE1_STATE_FLAG V_PCIE_WARM_RST_MODE1_STATE_FLAG(1U) + +#define S_PIO_WARM_RST_MODE0_STATE_FLAG 5 +#define V_PIO_WARM_RST_MODE0_STATE_FLAG(x) ((x) << S_PIO_WARM_RST_MODE0_STATE_FLAG) +#define F_PIO_WARM_RST_MODE0_STATE_FLAG V_PIO_WARM_RST_MODE0_STATE_FLAG(1U) + +#define S_PIO_WARM_RST_MODE1_STATE_FLAG 4 +#define V_PIO_WARM_RST_MODE1_STATE_FLAG(x) ((x) << S_PIO_WARM_RST_MODE1_STATE_FLAG) +#define F_PIO_WARM_RST_MODE1_STATE_FLAG V_PIO_WARM_RST_MODE1_STATE_FLAG(1U) + +#define S_LASTRESETSTATE 0 +#define M_LASTRESETSTATE 0x7U +#define V_LASTRESETSTATE(x) ((x) << S_LASTRESETSTATE) +#define G_LASTRESETSTATE(x) (((x) >> S_LASTRESETSTATE) & M_LASTRESETSTATE) + +#define A_PCIE_FUNC_DSTATE 0x5ed0 + +#define S_PF7_DSTATE 21 +#define M_PF7_DSTATE 0x7U +#define V_PF7_DSTATE(x) ((x) << S_PF7_DSTATE) +#define G_PF7_DSTATE(x) (((x) >> S_PF7_DSTATE) & M_PF7_DSTATE) + +#define S_PF6_DSTATE 18 +#define M_PF6_DSTATE 0x7U +#define V_PF6_DSTATE(x) ((x) << S_PF6_DSTATE) +#define G_PF6_DSTATE(x) (((x) >> S_PF6_DSTATE) & M_PF6_DSTATE) + +#define S_PF5_DSTATE 15 +#define M_PF5_DSTATE 0x7U +#define V_PF5_DSTATE(x) ((x) << S_PF5_DSTATE) +#define G_PF5_DSTATE(x) (((x) >> S_PF5_DSTATE) & M_PF5_DSTATE) + +#define S_PF4_DSTATE 12 +#define M_PF4_DSTATE 0x7U +#define V_PF4_DSTATE(x) ((x) << S_PF4_DSTATE) +#define G_PF4_DSTATE(x) (((x) >> S_PF4_DSTATE) & M_PF4_DSTATE) + +#define S_PF3_DSTATE 9 +#define M_PF3_DSTATE 0x7U +#define V_PF3_DSTATE(x) ((x) << S_PF3_DSTATE) +#define G_PF3_DSTATE(x) (((x) >> S_PF3_DSTATE) & M_PF3_DSTATE) + +#define S_PF2_DSTATE 6 +#define M_PF2_DSTATE 0x7U +#define V_PF2_DSTATE(x) ((x) << S_PF2_DSTATE) +#define G_PF2_DSTATE(x) (((x) >> S_PF2_DSTATE) & M_PF2_DSTATE) + +#define S_PF1_DSTATE 3 +#define M_PF1_DSTATE 0x7U +#define V_PF1_DSTATE(x) ((x) << S_PF1_DSTATE) +#define G_PF1_DSTATE(x) (((x) >> S_PF1_DSTATE) & M_PF1_DSTATE) + +#define S_PF0_DSTATE 0 +#define M_PF0_DSTATE 0x7U +#define V_PF0_DSTATE(x) ((x) << S_PF0_DSTATE) +#define G_PF0_DSTATE(x) (((x) >> S_PF0_DSTATE) & M_PF0_DSTATE) + +#define A_PCIE_DEBUG_ADDR_RANGE1 0x5ee0 +#define A_PCIE_DEBUG_ADDR_RANGE2 0x5ef0 +#define A_PCIE_DEBUG_ADDR_RANGE_CNT 0x5f00 +#define A_PCIE_PDEBUG_REG_0X0 0x0 +#define A_PCIE_PDEBUG_REG_0X1 0x1 +#define A_PCIE_PDEBUG_REG_0X2 0x2 + +#define S_TAGQ_CH0_TAGS_USED 11 +#define M_TAGQ_CH0_TAGS_USED 0xffU +#define V_TAGQ_CH0_TAGS_USED(x) ((x) << S_TAGQ_CH0_TAGS_USED) +#define G_TAGQ_CH0_TAGS_USED(x) (((x) >> S_TAGQ_CH0_TAGS_USED) & M_TAGQ_CH0_TAGS_USED) + +#define S_REQ_CH0_DATA_EMPTY 10 +#define V_REQ_CH0_DATA_EMPTY(x) ((x) << S_REQ_CH0_DATA_EMPTY) +#define F_REQ_CH0_DATA_EMPTY V_REQ_CH0_DATA_EMPTY(1U) + +#define S_RDQ_CH0_REQ_EMPTY 9 +#define V_RDQ_CH0_REQ_EMPTY(x) ((x) << S_RDQ_CH0_REQ_EMPTY) +#define F_RDQ_CH0_REQ_EMPTY V_RDQ_CH0_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH0_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_CMD V_REQ_CTL_RD_CH0_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH0_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_RDQ V_REQ_CTL_RD_CH0_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH0_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH0_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH0_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH0_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH0_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0X3 0x3 + +#define S_TAGQ_CH1_TAGS_USED 11 +#define M_TAGQ_CH1_TAGS_USED 0xffU +#define V_TAGQ_CH1_TAGS_USED(x) ((x) << S_TAGQ_CH1_TAGS_USED) +#define G_TAGQ_CH1_TAGS_USED(x) (((x) >> S_TAGQ_CH1_TAGS_USED) & M_TAGQ_CH1_TAGS_USED) + +#define S_REQ_CH1_DATA_EMPTY 10 +#define V_REQ_CH1_DATA_EMPTY(x) ((x) << S_REQ_CH1_DATA_EMPTY) +#define F_REQ_CH1_DATA_EMPTY V_REQ_CH1_DATA_EMPTY(1U) + +#define S_RDQ_CH1_REQ_EMPTY 9 +#define V_RDQ_CH1_REQ_EMPTY(x) ((x) << S_RDQ_CH1_REQ_EMPTY) +#define F_RDQ_CH1_REQ_EMPTY V_RDQ_CH1_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH1_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_CMD V_REQ_CTL_RD_CH1_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH1_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_RDQ V_REQ_CTL_RD_CH1_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH1_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH1_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH1_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH1_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH1_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0X4 0x4 + +#define S_TAGQ_CH2_TAGS_USED 11 +#define M_TAGQ_CH2_TAGS_USED 0xffU +#define V_TAGQ_CH2_TAGS_USED(x) ((x) << S_TAGQ_CH2_TAGS_USED) +#define G_TAGQ_CH2_TAGS_USED(x) (((x) >> S_TAGQ_CH2_TAGS_USED) & M_TAGQ_CH2_TAGS_USED) + +#define S_REQ_CH2_DATA_EMPTY 10 +#define V_REQ_CH2_DATA_EMPTY(x) ((x) << S_REQ_CH2_DATA_EMPTY) +#define F_REQ_CH2_DATA_EMPTY V_REQ_CH2_DATA_EMPTY(1U) + +#define S_RDQ_CH2_REQ_EMPTY 9 +#define V_RDQ_CH2_REQ_EMPTY(x) ((x) << S_RDQ_CH2_REQ_EMPTY) +#define F_RDQ_CH2_REQ_EMPTY V_RDQ_CH2_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH2_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_CMD V_REQ_CTL_RD_CH2_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH2_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_RDQ V_REQ_CTL_RD_CH2_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH2_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH2_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH2_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH2_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH2_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0X5 0x5 + +#define S_TAGQ_CH3_TAGS_USED 11 +#define M_TAGQ_CH3_TAGS_USED 0xffU +#define V_TAGQ_CH3_TAGS_USED(x) ((x) << S_TAGQ_CH3_TAGS_USED) +#define G_TAGQ_CH3_TAGS_USED(x) (((x) >> S_TAGQ_CH3_TAGS_USED) & M_TAGQ_CH3_TAGS_USED) + +#define S_REQ_CH3_DATA_EMPTY 10 +#define V_REQ_CH3_DATA_EMPTY(x) ((x) << S_REQ_CH3_DATA_EMPTY) +#define F_REQ_CH3_DATA_EMPTY V_REQ_CH3_DATA_EMPTY(1U) + +#define S_RDQ_CH3_REQ_EMPTY 9 +#define V_RDQ_CH3_REQ_EMPTY(x) ((x) << S_RDQ_CH3_REQ_EMPTY) +#define F_RDQ_CH3_REQ_EMPTY V_RDQ_CH3_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH3_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_CMD V_REQ_CTL_RD_CH3_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH3_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_RDQ V_REQ_CTL_RD_CH3_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH3_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH3_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH3_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH3_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH3_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0X6 0x6 + +#define S_TAGQ_CH4_TAGS_USED 11 +#define M_TAGQ_CH4_TAGS_USED 0xffU +#define V_TAGQ_CH4_TAGS_USED(x) ((x) << S_TAGQ_CH4_TAGS_USED) +#define G_TAGQ_CH4_TAGS_USED(x) (((x) >> S_TAGQ_CH4_TAGS_USED) & M_TAGQ_CH4_TAGS_USED) + +#define S_REQ_CH4_DATA_EMPTY 10 +#define V_REQ_CH4_DATA_EMPTY(x) ((x) << S_REQ_CH4_DATA_EMPTY) +#define F_REQ_CH4_DATA_EMPTY V_REQ_CH4_DATA_EMPTY(1U) + +#define S_RDQ_CH4_REQ_EMPTY 9 +#define V_RDQ_CH4_REQ_EMPTY(x) ((x) << S_RDQ_CH4_REQ_EMPTY) +#define F_RDQ_CH4_REQ_EMPTY V_RDQ_CH4_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH4_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_CMD V_REQ_CTL_RD_CH4_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH4_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_RDQ V_REQ_CTL_RD_CH4_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH4_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH4_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH4_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH4_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH4_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0X7 0x7 + +#define S_TAGQ_CH5_TAGS_USED 11 +#define M_TAGQ_CH5_TAGS_USED 0xffU +#define V_TAGQ_CH5_TAGS_USED(x) ((x) << S_TAGQ_CH5_TAGS_USED) +#define G_TAGQ_CH5_TAGS_USED(x) (((x) >> S_TAGQ_CH5_TAGS_USED) & M_TAGQ_CH5_TAGS_USED) + +#define S_REQ_CH5_DATA_EMPTY 10 +#define V_REQ_CH5_DATA_EMPTY(x) ((x) << S_REQ_CH5_DATA_EMPTY) +#define F_REQ_CH5_DATA_EMPTY V_REQ_CH5_DATA_EMPTY(1U) + +#define S_RDQ_CH5_REQ_EMPTY 9 +#define V_RDQ_CH5_REQ_EMPTY(x) ((x) << S_RDQ_CH5_REQ_EMPTY) +#define F_RDQ_CH5_REQ_EMPTY V_RDQ_CH5_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH5_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH5_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH5_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH5_WAIT_FOR_CMD V_REQ_CTL_RD_CH5_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH5_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH5_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH5_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH5_WAIT_FOR_RDQ V_REQ_CTL_RD_CH5_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH5_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH5_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH5_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH5_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH5_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0X8 0x8 + +#define S_TAGQ_CH6_TAGS_USED 11 +#define M_TAGQ_CH6_TAGS_USED 0xffU +#define V_TAGQ_CH6_TAGS_USED(x) ((x) << S_TAGQ_CH6_TAGS_USED) +#define G_TAGQ_CH6_TAGS_USED(x) (((x) >> S_TAGQ_CH6_TAGS_USED) & M_TAGQ_CH6_TAGS_USED) + +#define S_REQ_CH6_DATA_EMPTY 10 +#define V_REQ_CH6_DATA_EMPTY(x) ((x) << S_REQ_CH6_DATA_EMPTY) +#define F_REQ_CH6_DATA_EMPTY V_REQ_CH6_DATA_EMPTY(1U) + +#define S_RDQ_CH6_REQ_EMPTY 9 +#define V_RDQ_CH6_REQ_EMPTY(x) ((x) << S_RDQ_CH6_REQ_EMPTY) +#define F_RDQ_CH6_REQ_EMPTY V_RDQ_CH6_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH6_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH6_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH6_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH6_WAIT_FOR_CMD V_REQ_CTL_RD_CH6_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH6_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH6_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH6_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH6_WAIT_FOR_RDQ V_REQ_CTL_RD_CH6_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH6_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH6_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH6_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH6_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH6_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0X9 0x9 + +#define S_TAGQ_CH7_TAGS_USED 11 +#define M_TAGQ_CH7_TAGS_USED 0xffU +#define V_TAGQ_CH7_TAGS_USED(x) ((x) << S_TAGQ_CH7_TAGS_USED) +#define G_TAGQ_CH7_TAGS_USED(x) (((x) >> S_TAGQ_CH7_TAGS_USED) & M_TAGQ_CH7_TAGS_USED) + +#define S_REQ_CH7_DATA_EMPTY 10 +#define V_REQ_CH7_DATA_EMPTY(x) ((x) << S_REQ_CH7_DATA_EMPTY) +#define F_REQ_CH7_DATA_EMPTY V_REQ_CH7_DATA_EMPTY(1U) + +#define S_RDQ_CH7_REQ_EMPTY 9 +#define V_RDQ_CH7_REQ_EMPTY(x) ((x) << S_RDQ_CH7_REQ_EMPTY) +#define F_RDQ_CH7_REQ_EMPTY V_RDQ_CH7_REQ_EMPTY(1U) + +#define S_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ 8 +#define V_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ) +#define F_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ V_REQ_CTL_RD_CH7_WAIT_FOR_TAGTQ(1U) + +#define S_REQ_CTL_RD_CH7_WAIT_FOR_CMD 7 +#define V_REQ_CTL_RD_CH7_WAIT_FOR_CMD(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_CMD) +#define F_REQ_CTL_RD_CH7_WAIT_FOR_CMD V_REQ_CTL_RD_CH7_WAIT_FOR_CMD(1U) + +#define S_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM 6 +#define V_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM) +#define F_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM V_REQ_CTL_RD_CH7_WAIT_FOR_DATA_MEM(1U) + +#define S_REQ_CTL_RD_CH7_WAIT_FOR_RDQ 5 +#define V_REQ_CTL_RD_CH7_WAIT_FOR_RDQ(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_RDQ) +#define F_REQ_CTL_RD_CH7_WAIT_FOR_RDQ V_REQ_CTL_RD_CH7_WAIT_FOR_RDQ(1U) + +#define S_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO 4 +#define V_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO) +#define F_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO V_REQ_CTL_RD_CH7_WAIT_FOR_TXN_DISABLE_FIFO(1U) + +#define S_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED V_REQ_CTL_RD_CH7_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED V_REQ_CTL_RD_CH7_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE) +#define F_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE V_REQ_CTL_RD_CH7_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA V_REQ_CTL_RD_CH7_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0XA 0xa + +#define S_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM 27 +#define V_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM) +#define F_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM V_REQ_CTL_RD_CH0_WAIT_FOR_SEQNUM(1U) + +#define S_REQ_CTL_WR_CH0_SEQNUM 19 +#define M_REQ_CTL_WR_CH0_SEQNUM 0xffU +#define V_REQ_CTL_WR_CH0_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH0_SEQNUM) +#define G_REQ_CTL_WR_CH0_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH0_SEQNUM) & M_REQ_CTL_WR_CH0_SEQNUM) + +#define S_REQ_CTL_RD_CH0_SEQNUM 11 +#define M_REQ_CTL_RD_CH0_SEQNUM 0xffU +#define V_REQ_CTL_RD_CH0_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH0_SEQNUM) +#define G_REQ_CTL_RD_CH0_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH0_SEQNUM) & M_REQ_CTL_RD_CH0_SEQNUM) + +#define S_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO 4 +#define V_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO) +#define F_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO V_REQ_CTL_WR_CH0_WAIT_FOR_SI_FIFO(1U) + +#define S_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED V_REQ_CTL_WR_CH0_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED V_REQ_CTL_WR_CH0_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE) +#define F_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE V_REQ_CTL_WR_CH0_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA V_REQ_CTL_WR_CH0_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0XB 0xb + +#define S_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM 27 +#define V_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM) +#define F_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM V_REQ_CTL_RD_CH1_WAIT_FOR_SEQNUM(1U) + +#define S_REQ_CTL_WR_CH1_SEQNUM 19 +#define M_REQ_CTL_WR_CH1_SEQNUM 0xffU +#define V_REQ_CTL_WR_CH1_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH1_SEQNUM) +#define G_REQ_CTL_WR_CH1_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH1_SEQNUM) & M_REQ_CTL_WR_CH1_SEQNUM) + +#define S_REQ_CTL_RD_CH1_SEQNUM 11 +#define M_REQ_CTL_RD_CH1_SEQNUM 0xffU +#define V_REQ_CTL_RD_CH1_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH1_SEQNUM) +#define G_REQ_CTL_RD_CH1_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH1_SEQNUM) & M_REQ_CTL_RD_CH1_SEQNUM) + +#define S_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO 4 +#define V_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO) +#define F_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO V_REQ_CTL_WR_CH1_WAIT_FOR_SI_FIFO(1U) + +#define S_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED V_REQ_CTL_WR_CH1_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED V_REQ_CTL_WR_CH1_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE) +#define F_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE V_REQ_CTL_WR_CH1_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA V_REQ_CTL_WR_CH1_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0XC 0xc + +#define S_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM 27 +#define V_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM) +#define F_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM V_REQ_CTL_RD_CH2_WAIT_FOR_SEQNUM(1U) + +#define S_REQ_CTL_WR_CH2_SEQNUM 19 +#define M_REQ_CTL_WR_CH2_SEQNUM 0xffU +#define V_REQ_CTL_WR_CH2_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH2_SEQNUM) +#define G_REQ_CTL_WR_CH2_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH2_SEQNUM) & M_REQ_CTL_WR_CH2_SEQNUM) + +#define S_REQ_CTL_RD_CH2_SEQNUM 11 +#define M_REQ_CTL_RD_CH2_SEQNUM 0xffU +#define V_REQ_CTL_RD_CH2_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH2_SEQNUM) +#define G_REQ_CTL_RD_CH2_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH2_SEQNUM) & M_REQ_CTL_RD_CH2_SEQNUM) + +#define S_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO 4 +#define V_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO) +#define F_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO V_REQ_CTL_WR_CH2_WAIT_FOR_SI_FIFO(1U) + +#define S_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED V_REQ_CTL_WR_CH2_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED V_REQ_CTL_WR_CH2_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE) +#define F_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE V_REQ_CTL_WR_CH2_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA V_REQ_CTL_WR_CH2_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0XD 0xd + +#define S_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM 27 +#define V_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM) +#define F_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM V_REQ_CTL_RD_CH3_WAIT_FOR_SEQNUM(1U) + +#define S_REQ_CTL_WR_CH3_SEQNUM 19 +#define M_REQ_CTL_WR_CH3_SEQNUM 0xffU +#define V_REQ_CTL_WR_CH3_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH3_SEQNUM) +#define G_REQ_CTL_WR_CH3_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH3_SEQNUM) & M_REQ_CTL_WR_CH3_SEQNUM) + +#define S_REQ_CTL_RD_CH3_SEQNUM 11 +#define M_REQ_CTL_RD_CH3_SEQNUM 0xffU +#define V_REQ_CTL_RD_CH3_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH3_SEQNUM) +#define G_REQ_CTL_RD_CH3_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH3_SEQNUM) & M_REQ_CTL_RD_CH3_SEQNUM) + +#define S_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO 4 +#define V_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO) +#define F_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO V_REQ_CTL_WR_CH3_WAIT_FOR_SI_FIFO(1U) + +#define S_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED V_REQ_CTL_WR_CH3_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED V_REQ_CTL_WR_CH3_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE) +#define F_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE V_REQ_CTL_WR_CH3_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA V_REQ_CTL_WR_CH3_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0XE 0xe + +#define S_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM 27 +#define V_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM) +#define F_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM V_REQ_CTL_RD_CH4_WAIT_FOR_SEQNUM(1U) + +#define S_REQ_CTL_WR_CH4_SEQNUM 19 +#define M_REQ_CTL_WR_CH4_SEQNUM 0xffU +#define V_REQ_CTL_WR_CH4_SEQNUM(x) ((x) << S_REQ_CTL_WR_CH4_SEQNUM) +#define G_REQ_CTL_WR_CH4_SEQNUM(x) (((x) >> S_REQ_CTL_WR_CH4_SEQNUM) & M_REQ_CTL_WR_CH4_SEQNUM) + +#define S_REQ_CTL_RD_CH4_SEQNUM 11 +#define M_REQ_CTL_RD_CH4_SEQNUM 0xffU +#define V_REQ_CTL_RD_CH4_SEQNUM(x) ((x) << S_REQ_CTL_RD_CH4_SEQNUM) +#define G_REQ_CTL_RD_CH4_SEQNUM(x) (((x) >> S_REQ_CTL_RD_CH4_SEQNUM) & M_REQ_CTL_RD_CH4_SEQNUM) + +#define S_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO 4 +#define V_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO(x) ((x) << S_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO) +#define F_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO V_REQ_CTL_WR_CH4_WAIT_FOR_SI_FIFO(1U) + +#define S_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED 3 +#define V_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED) +#define F_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED V_REQ_CTL_WR_CH4_EXIT_BOT_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED 2 +#define V_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED(x) ((x) << S_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED) +#define F_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED V_REQ_CTL_WR_CH4_EXIT_TOP_VLD_STARTED(1U) + +#define S_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE 1 +#define V_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE(x) ((x) << S_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE) +#define F_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE V_REQ_CTL_WR_CH4_WAIT_FOR_PAUSE(1U) + +#define S_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA 0 +#define V_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA(x) ((x) << S_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA) +#define F_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA V_REQ_CTL_WR_CH4_WAIT_FOR_FIFO_DATA(1U) + +#define A_PCIE_PDEBUG_REG_0XF 0xf +#define A_PCIE_PDEBUG_REG_0X10 0x10 + +#define S_PIPE0_TX3_DATAK_0 31 +#define V_PIPE0_TX3_DATAK_0(x) ((x) << S_PIPE0_TX3_DATAK_0) +#define F_PIPE0_TX3_DATAK_0 V_PIPE0_TX3_DATAK_0(1U) + +#define S_PIPE0_TX3_DATA_6_0 24 +#define M_PIPE0_TX3_DATA_6_0 0x7fU +#define V_PIPE0_TX3_DATA_6_0(x) ((x) << S_PIPE0_TX3_DATA_6_0) +#define G_PIPE0_TX3_DATA_6_0(x) (((x) >> S_PIPE0_TX3_DATA_6_0) & M_PIPE0_TX3_DATA_6_0) + +#define S_PIPE0_TX2_DATA_7_0 16 +#define M_PIPE0_TX2_DATA_7_0 0xffU +#define V_PIPE0_TX2_DATA_7_0(x) ((x) << S_PIPE0_TX2_DATA_7_0) +#define G_PIPE0_TX2_DATA_7_0(x) (((x) >> S_PIPE0_TX2_DATA_7_0) & M_PIPE0_TX2_DATA_7_0) + +#define S_PIPE0_TX1_DATA_7_0 8 +#define M_PIPE0_TX1_DATA_7_0 0xffU +#define V_PIPE0_TX1_DATA_7_0(x) ((x) << S_PIPE0_TX1_DATA_7_0) +#define G_PIPE0_TX1_DATA_7_0(x) (((x) >> S_PIPE0_TX1_DATA_7_0) & M_PIPE0_TX1_DATA_7_0) + +#define S_PIPE0_TX0_DATAK_0 7 +#define V_PIPE0_TX0_DATAK_0(x) ((x) << S_PIPE0_TX0_DATAK_0) +#define F_PIPE0_TX0_DATAK_0 V_PIPE0_TX0_DATAK_0(1U) + +#define S_PIPE0_TX0_DATA_6_0 0 +#define M_PIPE0_TX0_DATA_6_0 0x7fU +#define V_PIPE0_TX0_DATA_6_0(x) ((x) << S_PIPE0_TX0_DATA_6_0) +#define G_PIPE0_TX0_DATA_6_0(x) (((x) >> S_PIPE0_TX0_DATA_6_0) & M_PIPE0_TX0_DATA_6_0) + +#define A_PCIE_PDEBUG_REG_0X11 0x11 + +#define S_PIPE0_TX3_DATAK_1 31 +#define V_PIPE0_TX3_DATAK_1(x) ((x) << S_PIPE0_TX3_DATAK_1) +#define F_PIPE0_TX3_DATAK_1 V_PIPE0_TX3_DATAK_1(1U) + +#define S_PIPE0_TX3_DATA_14_8 24 +#define M_PIPE0_TX3_DATA_14_8 0x7fU +#define V_PIPE0_TX3_DATA_14_8(x) ((x) << S_PIPE0_TX3_DATA_14_8) +#define G_PIPE0_TX3_DATA_14_8(x) (((x) >> S_PIPE0_TX3_DATA_14_8) & M_PIPE0_TX3_DATA_14_8) + +#define S_PIPE0_TX2_DATA_15_8 16 +#define M_PIPE0_TX2_DATA_15_8 0xffU +#define V_PIPE0_TX2_DATA_15_8(x) ((x) << S_PIPE0_TX2_DATA_15_8) +#define G_PIPE0_TX2_DATA_15_8(x) (((x) >> S_PIPE0_TX2_DATA_15_8) & M_PIPE0_TX2_DATA_15_8) + +#define S_PIPE0_TX1_DATA_15_8 8 +#define M_PIPE0_TX1_DATA_15_8 0xffU +#define V_PIPE0_TX1_DATA_15_8(x) ((x) << S_PIPE0_TX1_DATA_15_8) +#define G_PIPE0_TX1_DATA_15_8(x) (((x) >> S_PIPE0_TX1_DATA_15_8) & M_PIPE0_TX1_DATA_15_8) + +#define S_PIPE0_TX0_DATAK_1 7 +#define V_PIPE0_TX0_DATAK_1(x) ((x) << S_PIPE0_TX0_DATAK_1) +#define F_PIPE0_TX0_DATAK_1 V_PIPE0_TX0_DATAK_1(1U) + +#define S_PIPE0_TX0_DATA_14_8 0 +#define M_PIPE0_TX0_DATA_14_8 0x7fU +#define V_PIPE0_TX0_DATA_14_8(x) ((x) << S_PIPE0_TX0_DATA_14_8) +#define G_PIPE0_TX0_DATA_14_8(x) (((x) >> S_PIPE0_TX0_DATA_14_8) & M_PIPE0_TX0_DATA_14_8) + +#define A_PCIE_PDEBUG_REG_0X12 0x12 + +#define S_PIPE0_TX7_DATAK_0 31 +#define V_PIPE0_TX7_DATAK_0(x) ((x) << S_PIPE0_TX7_DATAK_0) +#define F_PIPE0_TX7_DATAK_0 V_PIPE0_TX7_DATAK_0(1U) + +#define S_PIPE0_TX7_DATA_6_0 24 +#define M_PIPE0_TX7_DATA_6_0 0x7fU +#define V_PIPE0_TX7_DATA_6_0(x) ((x) << S_PIPE0_TX7_DATA_6_0) +#define G_PIPE0_TX7_DATA_6_0(x) (((x) >> S_PIPE0_TX7_DATA_6_0) & M_PIPE0_TX7_DATA_6_0) + +#define S_PIPE0_TX6_DATA_7_0 16 +#define M_PIPE0_TX6_DATA_7_0 0xffU +#define V_PIPE0_TX6_DATA_7_0(x) ((x) << S_PIPE0_TX6_DATA_7_0) +#define G_PIPE0_TX6_DATA_7_0(x) (((x) >> S_PIPE0_TX6_DATA_7_0) & M_PIPE0_TX6_DATA_7_0) + +#define S_PIPE0_TX5_DATA_7_0 8 +#define M_PIPE0_TX5_DATA_7_0 0xffU +#define V_PIPE0_TX5_DATA_7_0(x) ((x) << S_PIPE0_TX5_DATA_7_0) +#define G_PIPE0_TX5_DATA_7_0(x) (((x) >> S_PIPE0_TX5_DATA_7_0) & M_PIPE0_TX5_DATA_7_0) + +#define S_PIPE0_TX4_DATAK_0 7 +#define V_PIPE0_TX4_DATAK_0(x) ((x) << S_PIPE0_TX4_DATAK_0) +#define F_PIPE0_TX4_DATAK_0 V_PIPE0_TX4_DATAK_0(1U) + +#define S_PIPE0_TX4_DATA_6_0 0 +#define M_PIPE0_TX4_DATA_6_0 0x7fU +#define V_PIPE0_TX4_DATA_6_0(x) ((x) << S_PIPE0_TX4_DATA_6_0) +#define G_PIPE0_TX4_DATA_6_0(x) (((x) >> S_PIPE0_TX4_DATA_6_0) & M_PIPE0_TX4_DATA_6_0) + +#define A_PCIE_PDEBUG_REG_0X13 0x13 + +#define S_PIPE0_TX7_DATAK_1 31 +#define V_PIPE0_TX7_DATAK_1(x) ((x) << S_PIPE0_TX7_DATAK_1) +#define F_PIPE0_TX7_DATAK_1 V_PIPE0_TX7_DATAK_1(1U) + +#define S_PIPE0_TX7_DATA_14_8 24 +#define M_PIPE0_TX7_DATA_14_8 0x7fU +#define V_PIPE0_TX7_DATA_14_8(x) ((x) << S_PIPE0_TX7_DATA_14_8) +#define G_PIPE0_TX7_DATA_14_8(x) (((x) >> S_PIPE0_TX7_DATA_14_8) & M_PIPE0_TX7_DATA_14_8) + +#define S_PIPE0_TX6_DATA_15_8 16 +#define M_PIPE0_TX6_DATA_15_8 0xffU +#define V_PIPE0_TX6_DATA_15_8(x) ((x) << S_PIPE0_TX6_DATA_15_8) +#define G_PIPE0_TX6_DATA_15_8(x) (((x) >> S_PIPE0_TX6_DATA_15_8) & M_PIPE0_TX6_DATA_15_8) + +#define S_PIPE0_TX5_DATA_15_8 8 +#define M_PIPE0_TX5_DATA_15_8 0xffU +#define V_PIPE0_TX5_DATA_15_8(x) ((x) << S_PIPE0_TX5_DATA_15_8) +#define G_PIPE0_TX5_DATA_15_8(x) (((x) >> S_PIPE0_TX5_DATA_15_8) & M_PIPE0_TX5_DATA_15_8) + +#define S_PIPE0_TX4_DATAK_1 7 +#define V_PIPE0_TX4_DATAK_1(x) ((x) << S_PIPE0_TX4_DATAK_1) +#define F_PIPE0_TX4_DATAK_1 V_PIPE0_TX4_DATAK_1(1U) + +#define S_PIPE0_TX4_DATA_14_8 0 +#define M_PIPE0_TX4_DATA_14_8 0x7fU +#define V_PIPE0_TX4_DATA_14_8(x) ((x) << S_PIPE0_TX4_DATA_14_8) +#define G_PIPE0_TX4_DATA_14_8(x) (((x) >> S_PIPE0_TX4_DATA_14_8) & M_PIPE0_TX4_DATA_14_8) + +#define A_PCIE_PDEBUG_REG_0X14 0x14 + +#define S_PIPE0_RX3_VALID_14 31 +#define V_PIPE0_RX3_VALID_14(x) ((x) << S_PIPE0_RX3_VALID_14) +#define F_PIPE0_RX3_VALID_14 V_PIPE0_RX3_VALID_14(1U) + +#define S_PIPE0_RX3_VALID2_14 24 +#define M_PIPE0_RX3_VALID2_14 0x7fU +#define V_PIPE0_RX3_VALID2_14(x) ((x) << S_PIPE0_RX3_VALID2_14) +#define G_PIPE0_RX3_VALID2_14(x) (((x) >> S_PIPE0_RX3_VALID2_14) & M_PIPE0_RX3_VALID2_14) + +#define S_PIPE0_RX2_VALID_14 16 +#define M_PIPE0_RX2_VALID_14 0xffU +#define V_PIPE0_RX2_VALID_14(x) ((x) << S_PIPE0_RX2_VALID_14) +#define G_PIPE0_RX2_VALID_14(x) (((x) >> S_PIPE0_RX2_VALID_14) & M_PIPE0_RX2_VALID_14) + +#define S_PIPE0_RX1_VALID_14 8 +#define M_PIPE0_RX1_VALID_14 0xffU +#define V_PIPE0_RX1_VALID_14(x) ((x) << S_PIPE0_RX1_VALID_14) +#define G_PIPE0_RX1_VALID_14(x) (((x) >> S_PIPE0_RX1_VALID_14) & M_PIPE0_RX1_VALID_14) + +#define S_PIPE0_RX0_VALID_14 7 +#define V_PIPE0_RX0_VALID_14(x) ((x) << S_PIPE0_RX0_VALID_14) +#define F_PIPE0_RX0_VALID_14 V_PIPE0_RX0_VALID_14(1U) + +#define S_PIPE0_RX0_VALID2_14 0 +#define M_PIPE0_RX0_VALID2_14 0x7fU +#define V_PIPE0_RX0_VALID2_14(x) ((x) << S_PIPE0_RX0_VALID2_14) +#define G_PIPE0_RX0_VALID2_14(x) (((x) >> S_PIPE0_RX0_VALID2_14) & M_PIPE0_RX0_VALID2_14) + +#define A_PCIE_PDEBUG_REG_0X15 0x15 + +#define S_PIPE0_RX3_VALID_15 31 +#define V_PIPE0_RX3_VALID_15(x) ((x) << S_PIPE0_RX3_VALID_15) +#define F_PIPE0_RX3_VALID_15 V_PIPE0_RX3_VALID_15(1U) + +#define S_PIPE0_RX3_VALID2_15 24 +#define M_PIPE0_RX3_VALID2_15 0x7fU +#define V_PIPE0_RX3_VALID2_15(x) ((x) << S_PIPE0_RX3_VALID2_15) +#define G_PIPE0_RX3_VALID2_15(x) (((x) >> S_PIPE0_RX3_VALID2_15) & M_PIPE0_RX3_VALID2_15) + +#define S_PIPE0_RX2_VALID_15 16 +#define M_PIPE0_RX2_VALID_15 0xffU +#define V_PIPE0_RX2_VALID_15(x) ((x) << S_PIPE0_RX2_VALID_15) +#define G_PIPE0_RX2_VALID_15(x) (((x) >> S_PIPE0_RX2_VALID_15) & M_PIPE0_RX2_VALID_15) + +#define S_PIPE0_RX1_VALID_15 8 +#define M_PIPE0_RX1_VALID_15 0xffU +#define V_PIPE0_RX1_VALID_15(x) ((x) << S_PIPE0_RX1_VALID_15) +#define G_PIPE0_RX1_VALID_15(x) (((x) >> S_PIPE0_RX1_VALID_15) & M_PIPE0_RX1_VALID_15) + +#define S_PIPE0_RX0_VALID_15 7 +#define V_PIPE0_RX0_VALID_15(x) ((x) << S_PIPE0_RX0_VALID_15) +#define F_PIPE0_RX0_VALID_15 V_PIPE0_RX0_VALID_15(1U) + +#define S_PIPE0_RX0_VALID2_15 0 +#define M_PIPE0_RX0_VALID2_15 0x7fU +#define V_PIPE0_RX0_VALID2_15(x) ((x) << S_PIPE0_RX0_VALID2_15) +#define G_PIPE0_RX0_VALID2_15(x) (((x) >> S_PIPE0_RX0_VALID2_15) & M_PIPE0_RX0_VALID2_15) + +#define A_PCIE_PDEBUG_REG_0X16 0x16 + +#define S_PIPE0_RX7_VALID_16 31 +#define V_PIPE0_RX7_VALID_16(x) ((x) << S_PIPE0_RX7_VALID_16) +#define F_PIPE0_RX7_VALID_16 V_PIPE0_RX7_VALID_16(1U) + +#define S_PIPE0_RX7_VALID2_16 24 +#define M_PIPE0_RX7_VALID2_16 0x7fU +#define V_PIPE0_RX7_VALID2_16(x) ((x) << S_PIPE0_RX7_VALID2_16) +#define G_PIPE0_RX7_VALID2_16(x) (((x) >> S_PIPE0_RX7_VALID2_16) & M_PIPE0_RX7_VALID2_16) + +#define S_PIPE0_RX6_VALID_16 16 +#define M_PIPE0_RX6_VALID_16 0xffU +#define V_PIPE0_RX6_VALID_16(x) ((x) << S_PIPE0_RX6_VALID_16) +#define G_PIPE0_RX6_VALID_16(x) (((x) >> S_PIPE0_RX6_VALID_16) & M_PIPE0_RX6_VALID_16) + +#define S_PIPE0_RX5_VALID_16 8 +#define M_PIPE0_RX5_VALID_16 0xffU +#define V_PIPE0_RX5_VALID_16(x) ((x) << S_PIPE0_RX5_VALID_16) +#define G_PIPE0_RX5_VALID_16(x) (((x) >> S_PIPE0_RX5_VALID_16) & M_PIPE0_RX5_VALID_16) + +#define S_PIPE0_RX4_VALID_16 7 +#define V_PIPE0_RX4_VALID_16(x) ((x) << S_PIPE0_RX4_VALID_16) +#define F_PIPE0_RX4_VALID_16 V_PIPE0_RX4_VALID_16(1U) + +#define S_PIPE0_RX4_VALID2_16 0 +#define M_PIPE0_RX4_VALID2_16 0x7fU +#define V_PIPE0_RX4_VALID2_16(x) ((x) << S_PIPE0_RX4_VALID2_16) +#define G_PIPE0_RX4_VALID2_16(x) (((x) >> S_PIPE0_RX4_VALID2_16) & M_PIPE0_RX4_VALID2_16) + +#define A_PCIE_PDEBUG_REG_0X17 0x17 + +#define S_PIPE0_RX7_VALID_17 31 +#define V_PIPE0_RX7_VALID_17(x) ((x) << S_PIPE0_RX7_VALID_17) +#define F_PIPE0_RX7_VALID_17 V_PIPE0_RX7_VALID_17(1U) + +#define S_PIPE0_RX7_VALID2_17 24 +#define M_PIPE0_RX7_VALID2_17 0x7fU +#define V_PIPE0_RX7_VALID2_17(x) ((x) << S_PIPE0_RX7_VALID2_17) +#define G_PIPE0_RX7_VALID2_17(x) (((x) >> S_PIPE0_RX7_VALID2_17) & M_PIPE0_RX7_VALID2_17) + +#define S_PIPE0_RX6_VALID_17 16 +#define M_PIPE0_RX6_VALID_17 0xffU +#define V_PIPE0_RX6_VALID_17(x) ((x) << S_PIPE0_RX6_VALID_17) +#define G_PIPE0_RX6_VALID_17(x) (((x) >> S_PIPE0_RX6_VALID_17) & M_PIPE0_RX6_VALID_17) + +#define S_PIPE0_RX5_VALID_17 8 +#define M_PIPE0_RX5_VALID_17 0xffU +#define V_PIPE0_RX5_VALID_17(x) ((x) << S_PIPE0_RX5_VALID_17) +#define G_PIPE0_RX5_VALID_17(x) (((x) >> S_PIPE0_RX5_VALID_17) & M_PIPE0_RX5_VALID_17) + +#define S_PIPE0_RX4_VALID_17 7 +#define V_PIPE0_RX4_VALID_17(x) ((x) << S_PIPE0_RX4_VALID_17) +#define F_PIPE0_RX4_VALID_17 V_PIPE0_RX4_VALID_17(1U) + +#define S_PIPE0_RX4_VALID2_17 0 +#define M_PIPE0_RX4_VALID2_17 0x7fU +#define V_PIPE0_RX4_VALID2_17(x) ((x) << S_PIPE0_RX4_VALID2_17) +#define G_PIPE0_RX4_VALID2_17(x) (((x) >> S_PIPE0_RX4_VALID2_17) & M_PIPE0_RX4_VALID2_17) + +#define A_PCIE_PDEBUG_REG_0X18 0x18 + +#define S_PIPE0_RX7_POLARITY 31 +#define V_PIPE0_RX7_POLARITY(x) ((x) << S_PIPE0_RX7_POLARITY) +#define F_PIPE0_RX7_POLARITY V_PIPE0_RX7_POLARITY(1U) + +#define S_PIPE0_RX7_STATUS 28 +#define M_PIPE0_RX7_STATUS 0x7U +#define V_PIPE0_RX7_STATUS(x) ((x) << S_PIPE0_RX7_STATUS) +#define G_PIPE0_RX7_STATUS(x) (((x) >> S_PIPE0_RX7_STATUS) & M_PIPE0_RX7_STATUS) + +#define S_PIPE0_RX6_POLARITY 27 +#define V_PIPE0_RX6_POLARITY(x) ((x) << S_PIPE0_RX6_POLARITY) +#define F_PIPE0_RX6_POLARITY V_PIPE0_RX6_POLARITY(1U) + +#define S_PIPE0_RX6_STATUS 24 +#define M_PIPE0_RX6_STATUS 0x7U +#define V_PIPE0_RX6_STATUS(x) ((x) << S_PIPE0_RX6_STATUS) +#define G_PIPE0_RX6_STATUS(x) (((x) >> S_PIPE0_RX6_STATUS) & M_PIPE0_RX6_STATUS) + +#define S_PIPE0_RX5_POLARITY 23 +#define V_PIPE0_RX5_POLARITY(x) ((x) << S_PIPE0_RX5_POLARITY) +#define F_PIPE0_RX5_POLARITY V_PIPE0_RX5_POLARITY(1U) + +#define S_PIPE0_RX5_STATUS 20 +#define M_PIPE0_RX5_STATUS 0x7U +#define V_PIPE0_RX5_STATUS(x) ((x) << S_PIPE0_RX5_STATUS) +#define G_PIPE0_RX5_STATUS(x) (((x) >> S_PIPE0_RX5_STATUS) & M_PIPE0_RX5_STATUS) + +#define S_PIPE0_RX4_POLARITY 19 +#define V_PIPE0_RX4_POLARITY(x) ((x) << S_PIPE0_RX4_POLARITY) +#define F_PIPE0_RX4_POLARITY V_PIPE0_RX4_POLARITY(1U) + +#define S_PIPE0_RX4_STATUS 16 +#define M_PIPE0_RX4_STATUS 0x7U +#define V_PIPE0_RX4_STATUS(x) ((x) << S_PIPE0_RX4_STATUS) +#define G_PIPE0_RX4_STATUS(x) (((x) >> S_PIPE0_RX4_STATUS) & M_PIPE0_RX4_STATUS) + +#define S_PIPE0_RX3_POLARITY 15 +#define V_PIPE0_RX3_POLARITY(x) ((x) << S_PIPE0_RX3_POLARITY) +#define F_PIPE0_RX3_POLARITY V_PIPE0_RX3_POLARITY(1U) + +#define S_PIPE0_RX3_STATUS 12 +#define M_PIPE0_RX3_STATUS 0x7U +#define V_PIPE0_RX3_STATUS(x) ((x) << S_PIPE0_RX3_STATUS) +#define G_PIPE0_RX3_STATUS(x) (((x) >> S_PIPE0_RX3_STATUS) & M_PIPE0_RX3_STATUS) + +#define S_PIPE0_RX2_POLARITY 11 +#define V_PIPE0_RX2_POLARITY(x) ((x) << S_PIPE0_RX2_POLARITY) +#define F_PIPE0_RX2_POLARITY V_PIPE0_RX2_POLARITY(1U) + +#define S_PIPE0_RX2_STATUS 8 +#define M_PIPE0_RX2_STATUS 0x7U +#define V_PIPE0_RX2_STATUS(x) ((x) << S_PIPE0_RX2_STATUS) +#define G_PIPE0_RX2_STATUS(x) (((x) >> S_PIPE0_RX2_STATUS) & M_PIPE0_RX2_STATUS) + +#define S_PIPE0_RX1_POLARITY 7 +#define V_PIPE0_RX1_POLARITY(x) ((x) << S_PIPE0_RX1_POLARITY) +#define F_PIPE0_RX1_POLARITY V_PIPE0_RX1_POLARITY(1U) + +#define S_PIPE0_RX1_STATUS 4 +#define M_PIPE0_RX1_STATUS 0x7U +#define V_PIPE0_RX1_STATUS(x) ((x) << S_PIPE0_RX1_STATUS) +#define G_PIPE0_RX1_STATUS(x) (((x) >> S_PIPE0_RX1_STATUS) & M_PIPE0_RX1_STATUS) + +#define S_PIPE0_RX0_POLARITY 3 +#define V_PIPE0_RX0_POLARITY(x) ((x) << S_PIPE0_RX0_POLARITY) +#define F_PIPE0_RX0_POLARITY V_PIPE0_RX0_POLARITY(1U) + +#define S_PIPE0_RX0_STATUS 0 +#define M_PIPE0_RX0_STATUS 0x7U +#define V_PIPE0_RX0_STATUS(x) ((x) << S_PIPE0_RX0_STATUS) +#define G_PIPE0_RX0_STATUS(x) (((x) >> S_PIPE0_RX0_STATUS) & M_PIPE0_RX0_STATUS) + +#define A_PCIE_PDEBUG_REG_0X19 0x19 + +#define S_PIPE0_TX7_COMPLIANCE 31 +#define V_PIPE0_TX7_COMPLIANCE(x) ((x) << S_PIPE0_TX7_COMPLIANCE) +#define F_PIPE0_TX7_COMPLIANCE V_PIPE0_TX7_COMPLIANCE(1U) + +#define S_PIPE0_TX6_COMPLIANCE 30 +#define V_PIPE0_TX6_COMPLIANCE(x) ((x) << S_PIPE0_TX6_COMPLIANCE) +#define F_PIPE0_TX6_COMPLIANCE V_PIPE0_TX6_COMPLIANCE(1U) + +#define S_PIPE0_TX5_COMPLIANCE 29 +#define V_PIPE0_TX5_COMPLIANCE(x) ((x) << S_PIPE0_TX5_COMPLIANCE) +#define F_PIPE0_TX5_COMPLIANCE V_PIPE0_TX5_COMPLIANCE(1U) + +#define S_PIPE0_TX4_COMPLIANCE 28 +#define V_PIPE0_TX4_COMPLIANCE(x) ((x) << S_PIPE0_TX4_COMPLIANCE) +#define F_PIPE0_TX4_COMPLIANCE V_PIPE0_TX4_COMPLIANCE(1U) + +#define S_PIPE0_TX3_COMPLIANCE 27 +#define V_PIPE0_TX3_COMPLIANCE(x) ((x) << S_PIPE0_TX3_COMPLIANCE) +#define F_PIPE0_TX3_COMPLIANCE V_PIPE0_TX3_COMPLIANCE(1U) + +#define S_PIPE0_TX2_COMPLIANCE 26 +#define V_PIPE0_TX2_COMPLIANCE(x) ((x) << S_PIPE0_TX2_COMPLIANCE) +#define F_PIPE0_TX2_COMPLIANCE V_PIPE0_TX2_COMPLIANCE(1U) + +#define S_PIPE0_TX1_COMPLIANCE 25 +#define V_PIPE0_TX1_COMPLIANCE(x) ((x) << S_PIPE0_TX1_COMPLIANCE) +#define F_PIPE0_TX1_COMPLIANCE V_PIPE0_TX1_COMPLIANCE(1U) + +#define S_PIPE0_TX0_COMPLIANCE 24 +#define V_PIPE0_TX0_COMPLIANCE(x) ((x) << S_PIPE0_TX0_COMPLIANCE) +#define F_PIPE0_TX0_COMPLIANCE V_PIPE0_TX0_COMPLIANCE(1U) + +#define S_PIPE0_TX7_ELECIDLE 23 +#define V_PIPE0_TX7_ELECIDLE(x) ((x) << S_PIPE0_TX7_ELECIDLE) +#define F_PIPE0_TX7_ELECIDLE V_PIPE0_TX7_ELECIDLE(1U) + +#define S_PIPE0_TX6_ELECIDLE 22 +#define V_PIPE0_TX6_ELECIDLE(x) ((x) << S_PIPE0_TX6_ELECIDLE) +#define F_PIPE0_TX6_ELECIDLE V_PIPE0_TX6_ELECIDLE(1U) + +#define S_PIPE0_TX5_ELECIDLE 21 +#define V_PIPE0_TX5_ELECIDLE(x) ((x) << S_PIPE0_TX5_ELECIDLE) +#define F_PIPE0_TX5_ELECIDLE V_PIPE0_TX5_ELECIDLE(1U) + +#define S_PIPE0_TX4_ELECIDLE 20 +#define V_PIPE0_TX4_ELECIDLE(x) ((x) << S_PIPE0_TX4_ELECIDLE) +#define F_PIPE0_TX4_ELECIDLE V_PIPE0_TX4_ELECIDLE(1U) + +#define S_PIPE0_TX3_ELECIDLE 19 +#define V_PIPE0_TX3_ELECIDLE(x) ((x) << S_PIPE0_TX3_ELECIDLE) +#define F_PIPE0_TX3_ELECIDLE V_PIPE0_TX3_ELECIDLE(1U) + +#define S_PIPE0_TX2_ELECIDLE 18 +#define V_PIPE0_TX2_ELECIDLE(x) ((x) << S_PIPE0_TX2_ELECIDLE) +#define F_PIPE0_TX2_ELECIDLE V_PIPE0_TX2_ELECIDLE(1U) + +#define S_PIPE0_TX1_ELECIDLE 17 +#define V_PIPE0_TX1_ELECIDLE(x) ((x) << S_PIPE0_TX1_ELECIDLE) +#define F_PIPE0_TX1_ELECIDLE V_PIPE0_TX1_ELECIDLE(1U) + +#define S_PIPE0_TX0_ELECIDLE 16 +#define V_PIPE0_TX0_ELECIDLE(x) ((x) << S_PIPE0_TX0_ELECIDLE) +#define F_PIPE0_TX0_ELECIDLE V_PIPE0_TX0_ELECIDLE(1U) + +#define S_PIPE0_RX7_POLARITY_19 15 +#define V_PIPE0_RX7_POLARITY_19(x) ((x) << S_PIPE0_RX7_POLARITY_19) +#define F_PIPE0_RX7_POLARITY_19 V_PIPE0_RX7_POLARITY_19(1U) + +#define S_PIPE0_RX6_POLARITY_19 14 +#define V_PIPE0_RX6_POLARITY_19(x) ((x) << S_PIPE0_RX6_POLARITY_19) +#define F_PIPE0_RX6_POLARITY_19 V_PIPE0_RX6_POLARITY_19(1U) + +#define S_PIPE0_RX5_POLARITY_19 13 +#define V_PIPE0_RX5_POLARITY_19(x) ((x) << S_PIPE0_RX5_POLARITY_19) +#define F_PIPE0_RX5_POLARITY_19 V_PIPE0_RX5_POLARITY_19(1U) + +#define S_PIPE0_RX4_POLARITY_19 12 +#define V_PIPE0_RX4_POLARITY_19(x) ((x) << S_PIPE0_RX4_POLARITY_19) +#define F_PIPE0_RX4_POLARITY_19 V_PIPE0_RX4_POLARITY_19(1U) + +#define S_PIPE0_RX3_POLARITY_19 11 +#define V_PIPE0_RX3_POLARITY_19(x) ((x) << S_PIPE0_RX3_POLARITY_19) +#define F_PIPE0_RX3_POLARITY_19 V_PIPE0_RX3_POLARITY_19(1U) + +#define S_PIPE0_RX2_POLARITY_19 10 +#define V_PIPE0_RX2_POLARITY_19(x) ((x) << S_PIPE0_RX2_POLARITY_19) +#define F_PIPE0_RX2_POLARITY_19 V_PIPE0_RX2_POLARITY_19(1U) + +#define S_PIPE0_RX1_POLARITY_19 9 +#define V_PIPE0_RX1_POLARITY_19(x) ((x) << S_PIPE0_RX1_POLARITY_19) +#define F_PIPE0_RX1_POLARITY_19 V_PIPE0_RX1_POLARITY_19(1U) + +#define S_PIPE0_RX0_POLARITY_19 8 +#define V_PIPE0_RX0_POLARITY_19(x) ((x) << S_PIPE0_RX0_POLARITY_19) +#define F_PIPE0_RX0_POLARITY_19 V_PIPE0_RX0_POLARITY_19(1U) + +#define S_PIPE0_RX7_ELECIDLE 7 +#define V_PIPE0_RX7_ELECIDLE(x) ((x) << S_PIPE0_RX7_ELECIDLE) +#define F_PIPE0_RX7_ELECIDLE V_PIPE0_RX7_ELECIDLE(1U) + +#define S_PIPE0_RX6_ELECIDLE 6 +#define V_PIPE0_RX6_ELECIDLE(x) ((x) << S_PIPE0_RX6_ELECIDLE) +#define F_PIPE0_RX6_ELECIDLE V_PIPE0_RX6_ELECIDLE(1U) + +#define S_PIPE0_RX5_ELECIDLE 5 +#define V_PIPE0_RX5_ELECIDLE(x) ((x) << S_PIPE0_RX5_ELECIDLE) +#define F_PIPE0_RX5_ELECIDLE V_PIPE0_RX5_ELECIDLE(1U) + +#define S_PIPE0_RX4_ELECIDLE 4 +#define V_PIPE0_RX4_ELECIDLE(x) ((x) << S_PIPE0_RX4_ELECIDLE) +#define F_PIPE0_RX4_ELECIDLE V_PIPE0_RX4_ELECIDLE(1U) + +#define S_PIPE0_RX3_ELECIDLE 3 +#define V_PIPE0_RX3_ELECIDLE(x) ((x) << S_PIPE0_RX3_ELECIDLE) +#define F_PIPE0_RX3_ELECIDLE V_PIPE0_RX3_ELECIDLE(1U) + +#define S_PIPE0_RX2_ELECIDLE 2 +#define V_PIPE0_RX2_ELECIDLE(x) ((x) << S_PIPE0_RX2_ELECIDLE) +#define F_PIPE0_RX2_ELECIDLE V_PIPE0_RX2_ELECIDLE(1U) + +#define S_PIPE0_RX1_ELECIDLE 1 +#define V_PIPE0_RX1_ELECIDLE(x) ((x) << S_PIPE0_RX1_ELECIDLE) +#define F_PIPE0_RX1_ELECIDLE V_PIPE0_RX1_ELECIDLE(1U) + +#define S_PIPE0_RX0_ELECIDLE 0 +#define V_PIPE0_RX0_ELECIDLE(x) ((x) << S_PIPE0_RX0_ELECIDLE) +#define F_PIPE0_RX0_ELECIDLE V_PIPE0_RX0_ELECIDLE(1U) + +#define A_PCIE_PDEBUG_REG_0X1A 0x1a + +#define S_PIPE0_RESET_N 21 +#define V_PIPE0_RESET_N(x) ((x) << S_PIPE0_RESET_N) +#define F_PIPE0_RESET_N V_PIPE0_RESET_N(1U) + +#define S_PCS_COMMON_CLOCKS 20 +#define V_PCS_COMMON_CLOCKS(x) ((x) << S_PCS_COMMON_CLOCKS) +#define F_PCS_COMMON_CLOCKS V_PCS_COMMON_CLOCKS(1U) + +#define S_PCS_CLK_REQ 19 +#define V_PCS_CLK_REQ(x) ((x) << S_PCS_CLK_REQ) +#define F_PCS_CLK_REQ V_PCS_CLK_REQ(1U) + +#define S_PIPE_CLKREQ_N 18 +#define V_PIPE_CLKREQ_N(x) ((x) << S_PIPE_CLKREQ_N) +#define F_PIPE_CLKREQ_N V_PIPE_CLKREQ_N(1U) + +#define S_MAC_CLKREQ_N_TO_MUX 17 +#define V_MAC_CLKREQ_N_TO_MUX(x) ((x) << S_MAC_CLKREQ_N_TO_MUX) +#define F_MAC_CLKREQ_N_TO_MUX V_MAC_CLKREQ_N_TO_MUX(1U) + +#define S_PIPE0_TX2RX_LOOPBK 16 +#define V_PIPE0_TX2RX_LOOPBK(x) ((x) << S_PIPE0_TX2RX_LOOPBK) +#define F_PIPE0_TX2RX_LOOPBK V_PIPE0_TX2RX_LOOPBK(1U) + +#define S_PIPE0_TX_SWING 15 +#define V_PIPE0_TX_SWING(x) ((x) << S_PIPE0_TX_SWING) +#define F_PIPE0_TX_SWING V_PIPE0_TX_SWING(1U) + +#define S_PIPE0_TX_MARGIN 12 +#define M_PIPE0_TX_MARGIN 0x7U +#define V_PIPE0_TX_MARGIN(x) ((x) << S_PIPE0_TX_MARGIN) +#define G_PIPE0_TX_MARGIN(x) (((x) >> S_PIPE0_TX_MARGIN) & M_PIPE0_TX_MARGIN) + +#define S_PIPE0_TX_DEEMPH 11 +#define V_PIPE0_TX_DEEMPH(x) ((x) << S_PIPE0_TX_DEEMPH) +#define F_PIPE0_TX_DEEMPH V_PIPE0_TX_DEEMPH(1U) + +#define S_PIPE0_TX_DETECTRX 10 +#define V_PIPE0_TX_DETECTRX(x) ((x) << S_PIPE0_TX_DETECTRX) +#define F_PIPE0_TX_DETECTRX V_PIPE0_TX_DETECTRX(1U) + +#define S_PIPE0_POWERDOWN 8 +#define M_PIPE0_POWERDOWN 0x3U +#define V_PIPE0_POWERDOWN(x) ((x) << S_PIPE0_POWERDOWN) +#define G_PIPE0_POWERDOWN(x) (((x) >> S_PIPE0_POWERDOWN) & M_PIPE0_POWERDOWN) + +#define S_PHY_MAC_PHYSTATUS 0 +#define M_PHY_MAC_PHYSTATUS 0xffU +#define V_PHY_MAC_PHYSTATUS(x) ((x) << S_PHY_MAC_PHYSTATUS) +#define G_PHY_MAC_PHYSTATUS(x) (((x) >> S_PHY_MAC_PHYSTATUS) & M_PHY_MAC_PHYSTATUS) + +#define A_PCIE_PDEBUG_REG_0X1B 0x1b + +#define S_PIPE0_RX7_EQ_IN_PROG 31 +#define V_PIPE0_RX7_EQ_IN_PROG(x) ((x) << S_PIPE0_RX7_EQ_IN_PROG) +#define F_PIPE0_RX7_EQ_IN_PROG V_PIPE0_RX7_EQ_IN_PROG(1U) + +#define S_PIPE0_RX7_EQ_INVLD_REQ 30 +#define V_PIPE0_RX7_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX7_EQ_INVLD_REQ) +#define F_PIPE0_RX7_EQ_INVLD_REQ V_PIPE0_RX7_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX7_SYNCHEADER 28 +#define M_PIPE0_RX7_SYNCHEADER 0x3U +#define V_PIPE0_RX7_SYNCHEADER(x) ((x) << S_PIPE0_RX7_SYNCHEADER) +#define G_PIPE0_RX7_SYNCHEADER(x) (((x) >> S_PIPE0_RX7_SYNCHEADER) & M_PIPE0_RX7_SYNCHEADER) + +#define S_PIPE0_RX6_EQ_IN_PROG 27 +#define V_PIPE0_RX6_EQ_IN_PROG(x) ((x) << S_PIPE0_RX6_EQ_IN_PROG) +#define F_PIPE0_RX6_EQ_IN_PROG V_PIPE0_RX6_EQ_IN_PROG(1U) + +#define S_PIPE0_RX6_EQ_INVLD_REQ 26 +#define V_PIPE0_RX6_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX6_EQ_INVLD_REQ) +#define F_PIPE0_RX6_EQ_INVLD_REQ V_PIPE0_RX6_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX6_SYNCHEADER 24 +#define M_PIPE0_RX6_SYNCHEADER 0x3U +#define V_PIPE0_RX6_SYNCHEADER(x) ((x) << S_PIPE0_RX6_SYNCHEADER) +#define G_PIPE0_RX6_SYNCHEADER(x) (((x) >> S_PIPE0_RX6_SYNCHEADER) & M_PIPE0_RX6_SYNCHEADER) + +#define S_PIPE0_RX5_EQ_IN_PROG 23 +#define V_PIPE0_RX5_EQ_IN_PROG(x) ((x) << S_PIPE0_RX5_EQ_IN_PROG) +#define F_PIPE0_RX5_EQ_IN_PROG V_PIPE0_RX5_EQ_IN_PROG(1U) + +#define S_PIPE0_RX5_EQ_INVLD_REQ 22 +#define V_PIPE0_RX5_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX5_EQ_INVLD_REQ) +#define F_PIPE0_RX5_EQ_INVLD_REQ V_PIPE0_RX5_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX5_SYNCHEADER 20 +#define M_PIPE0_RX5_SYNCHEADER 0x3U +#define V_PIPE0_RX5_SYNCHEADER(x) ((x) << S_PIPE0_RX5_SYNCHEADER) +#define G_PIPE0_RX5_SYNCHEADER(x) (((x) >> S_PIPE0_RX5_SYNCHEADER) & M_PIPE0_RX5_SYNCHEADER) + +#define S_PIPE0_RX4_EQ_IN_PROG 19 +#define V_PIPE0_RX4_EQ_IN_PROG(x) ((x) << S_PIPE0_RX4_EQ_IN_PROG) +#define F_PIPE0_RX4_EQ_IN_PROG V_PIPE0_RX4_EQ_IN_PROG(1U) + +#define S_PIPE0_RX4_EQ_INVLD_REQ 18 +#define V_PIPE0_RX4_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX4_EQ_INVLD_REQ) +#define F_PIPE0_RX4_EQ_INVLD_REQ V_PIPE0_RX4_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX4_SYNCHEADER 16 +#define M_PIPE0_RX4_SYNCHEADER 0x3U +#define V_PIPE0_RX4_SYNCHEADER(x) ((x) << S_PIPE0_RX4_SYNCHEADER) +#define G_PIPE0_RX4_SYNCHEADER(x) (((x) >> S_PIPE0_RX4_SYNCHEADER) & M_PIPE0_RX4_SYNCHEADER) + +#define S_PIPE0_RX3_EQ_IN_PROG 15 +#define V_PIPE0_RX3_EQ_IN_PROG(x) ((x) << S_PIPE0_RX3_EQ_IN_PROG) +#define F_PIPE0_RX3_EQ_IN_PROG V_PIPE0_RX3_EQ_IN_PROG(1U) + +#define S_PIPE0_RX3_EQ_INVLD_REQ 14 +#define V_PIPE0_RX3_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX3_EQ_INVLD_REQ) +#define F_PIPE0_RX3_EQ_INVLD_REQ V_PIPE0_RX3_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX3_SYNCHEADER 12 +#define M_PIPE0_RX3_SYNCHEADER 0x3U +#define V_PIPE0_RX3_SYNCHEADER(x) ((x) << S_PIPE0_RX3_SYNCHEADER) +#define G_PIPE0_RX3_SYNCHEADER(x) (((x) >> S_PIPE0_RX3_SYNCHEADER) & M_PIPE0_RX3_SYNCHEADER) + +#define S_PIPE0_RX2_EQ_IN_PROG 11 +#define V_PIPE0_RX2_EQ_IN_PROG(x) ((x) << S_PIPE0_RX2_EQ_IN_PROG) +#define F_PIPE0_RX2_EQ_IN_PROG V_PIPE0_RX2_EQ_IN_PROG(1U) + +#define S_PIPE0_RX2_EQ_INVLD_REQ 10 +#define V_PIPE0_RX2_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX2_EQ_INVLD_REQ) +#define F_PIPE0_RX2_EQ_INVLD_REQ V_PIPE0_RX2_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX2_SYNCHEADER 8 +#define M_PIPE0_RX2_SYNCHEADER 0x3U +#define V_PIPE0_RX2_SYNCHEADER(x) ((x) << S_PIPE0_RX2_SYNCHEADER) +#define G_PIPE0_RX2_SYNCHEADER(x) (((x) >> S_PIPE0_RX2_SYNCHEADER) & M_PIPE0_RX2_SYNCHEADER) + +#define S_PIPE0_RX1_EQ_IN_PROG 7 +#define V_PIPE0_RX1_EQ_IN_PROG(x) ((x) << S_PIPE0_RX1_EQ_IN_PROG) +#define F_PIPE0_RX1_EQ_IN_PROG V_PIPE0_RX1_EQ_IN_PROG(1U) + +#define S_PIPE0_RX1_EQ_INVLD_REQ 6 +#define V_PIPE0_RX1_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX1_EQ_INVLD_REQ) +#define F_PIPE0_RX1_EQ_INVLD_REQ V_PIPE0_RX1_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX1_SYNCHEADER 4 +#define M_PIPE0_RX1_SYNCHEADER 0x3U +#define V_PIPE0_RX1_SYNCHEADER(x) ((x) << S_PIPE0_RX1_SYNCHEADER) +#define G_PIPE0_RX1_SYNCHEADER(x) (((x) >> S_PIPE0_RX1_SYNCHEADER) & M_PIPE0_RX1_SYNCHEADER) + +#define S_PIPE0_RX0_EQ_IN_PROG 3 +#define V_PIPE0_RX0_EQ_IN_PROG(x) ((x) << S_PIPE0_RX0_EQ_IN_PROG) +#define F_PIPE0_RX0_EQ_IN_PROG V_PIPE0_RX0_EQ_IN_PROG(1U) + +#define S_PIPE0_RX0_EQ_INVLD_REQ 2 +#define V_PIPE0_RX0_EQ_INVLD_REQ(x) ((x) << S_PIPE0_RX0_EQ_INVLD_REQ) +#define F_PIPE0_RX0_EQ_INVLD_REQ V_PIPE0_RX0_EQ_INVLD_REQ(1U) + +#define S_PIPE0_RX0_SYNCHEADER 0 +#define M_PIPE0_RX0_SYNCHEADER 0x3U +#define V_PIPE0_RX0_SYNCHEADER(x) ((x) << S_PIPE0_RX0_SYNCHEADER) +#define G_PIPE0_RX0_SYNCHEADER(x) (((x) >> S_PIPE0_RX0_SYNCHEADER) & M_PIPE0_RX0_SYNCHEADER) + +#define A_PCIE_PDEBUG_REG_0X1C 0x1c + +#define S_SI_REQVFID 24 +#define M_SI_REQVFID 0xffU +#define V_SI_REQVFID(x) ((x) << S_SI_REQVFID) +#define G_SI_REQVFID(x) (((x) >> S_SI_REQVFID) & M_SI_REQVFID) + +#define S_SI_REQVEC 13 +#define M_SI_REQVEC 0x7ffU +#define V_SI_REQVEC(x) ((x) << S_SI_REQVEC) +#define G_SI_REQVEC(x) (((x) >> S_SI_REQVEC) & M_SI_REQVEC) + +#define S_SI_REQTCVAL 10 +#define M_SI_REQTCVAL 0x7U +#define V_SI_REQTCVAL(x) ((x) << S_SI_REQTCVAL) +#define G_SI_REQTCVAL(x) (((x) >> S_SI_REQTCVAL) & M_SI_REQTCVAL) + +#define S_SI_REQRDY 9 +#define V_SI_REQRDY(x) ((x) << S_SI_REQRDY) +#define F_SI_REQRDY V_SI_REQRDY(1U) + +#define S_SI_REQVLD 8 +#define V_SI_REQVLD(x) ((x) << S_SI_REQVLD) +#define F_SI_REQVLD V_SI_REQVLD(1U) + +#define S_T5_AI 0 +#define M_T5_AI 0xffU +#define V_T5_AI(x) ((x) << S_T5_AI) +#define G_T5_AI(x) (((x) >> S_T5_AI) & M_T5_AI) + +#define A_PCIE_PDEBUG_REG_0X1D 0x1d + +#define S_GNTSI 31 +#define V_GNTSI(x) ((x) << S_GNTSI) +#define F_GNTSI V_GNTSI(1U) + +#define S_DROPINTFORFLR 30 +#define V_DROPINTFORFLR(x) ((x) << S_DROPINTFORFLR) +#define F_DROPINTFORFLR V_DROPINTFORFLR(1U) + +#define S_SMARB 27 +#define M_SMARB 0x7U +#define V_SMARB(x) ((x) << S_SMARB) +#define G_SMARB(x) (((x) >> S_SMARB) & M_SMARB) + +#define S_SMDEFR 24 +#define M_SMDEFR 0x7U +#define V_SMDEFR(x) ((x) << S_SMDEFR) +#define G_SMDEFR(x) (((x) >> S_SMDEFR) & M_SMDEFR) + +#define S_SYS_INT 16 +#define M_SYS_INT 0xffU +#define V_SYS_INT(x) ((x) << S_SYS_INT) +#define G_SYS_INT(x) (((x) >> S_SYS_INT) & M_SYS_INT) + +#define S_CFG_INTXCLR 8 +#define M_CFG_INTXCLR 0xffU +#define V_CFG_INTXCLR(x) ((x) << S_CFG_INTXCLR) +#define G_CFG_INTXCLR(x) (((x) >> S_CFG_INTXCLR) & M_CFG_INTXCLR) + +#define S_PIO_INTXCLR 0 +#define M_PIO_INTXCLR 0xffU +#define V_PIO_INTXCLR(x) ((x) << S_PIO_INTXCLR) +#define G_PIO_INTXCLR(x) (((x) >> S_PIO_INTXCLR) & M_PIO_INTXCLR) + +#define A_PCIE_PDEBUG_REG_0X1E 0x1e + +#define S_PLI_TABDATWREN 31 +#define V_PLI_TABDATWREN(x) ((x) << S_PLI_TABDATWREN) +#define F_PLI_TABDATWREN V_PLI_TABDATWREN(1U) + +#define S_TAB_RDENA 30 +#define V_TAB_RDENA(x) ((x) << S_TAB_RDENA) +#define F_TAB_RDENA V_TAB_RDENA(1U) + +#define S_TAB_RDENA2 19 +#define M_TAB_RDENA2 0x7ffU +#define V_TAB_RDENA2(x) ((x) << S_TAB_RDENA2) +#define G_TAB_RDENA2(x) (((x) >> S_TAB_RDENA2) & M_TAB_RDENA2) + +#define S_PLI_REQADDR 10 +#define M_PLI_REQADDR 0x1ffU +#define V_PLI_REQADDR(x) ((x) << S_PLI_REQADDR) +#define G_PLI_REQADDR(x) (((x) >> S_PLI_REQADDR) & M_PLI_REQADDR) + +#define S_PLI_REQVFID 2 +#define M_PLI_REQVFID 0xffU +#define V_PLI_REQVFID(x) ((x) << S_PLI_REQVFID) +#define G_PLI_REQVFID(x) (((x) >> S_PLI_REQVFID) & M_PLI_REQVFID) + +#define S_PLI_REQTABHIT 1 +#define V_PLI_REQTABHIT(x) ((x) << S_PLI_REQTABHIT) +#define F_PLI_REQTABHIT V_PLI_REQTABHIT(1U) + +#define S_PLI_REQRDVLD 0 +#define V_PLI_REQRDVLD(x) ((x) << S_PLI_REQRDVLD) +#define F_PLI_REQRDVLD V_PLI_REQRDVLD(1U) + +#define A_PCIE_PDEBUG_REG_0X1F 0x1f +#define A_PCIE_PDEBUG_REG_0X20 0x20 +#define A_PCIE_PDEBUG_REG_0X21 0x21 + +#define S_PLI_REQPBASTART 20 +#define M_PLI_REQPBASTART 0xfffU +#define V_PLI_REQPBASTART(x) ((x) << S_PLI_REQPBASTART) +#define G_PLI_REQPBASTART(x) (((x) >> S_PLI_REQPBASTART) & M_PLI_REQPBASTART) + +#define S_PLI_REQPBAEND 9 +#define M_PLI_REQPBAEND 0x7ffU +#define V_PLI_REQPBAEND(x) ((x) << S_PLI_REQPBAEND) +#define G_PLI_REQPBAEND(x) (((x) >> S_PLI_REQPBAEND) & M_PLI_REQPBAEND) + +#define S_T5_PLI_REQVFID 2 +#define M_T5_PLI_REQVFID 0x7fU +#define V_T5_PLI_REQVFID(x) ((x) << S_T5_PLI_REQVFID) +#define G_T5_PLI_REQVFID(x) (((x) >> S_T5_PLI_REQVFID) & M_T5_PLI_REQVFID) + +#define S_PLI_REQPBAHIT 1 +#define V_PLI_REQPBAHIT(x) ((x) << S_PLI_REQPBAHIT) +#define F_PLI_REQPBAHIT V_PLI_REQPBAHIT(1U) + +#define A_PCIE_PDEBUG_REG_0X22 0x22 + +#define S_GNTSI1 31 +#define V_GNTSI1(x) ((x) << S_GNTSI1) +#define F_GNTSI1 V_GNTSI1(1U) + +#define S_GNTSI2 30 +#define V_GNTSI2(x) ((x) << S_GNTSI2) +#define F_GNTSI2 V_GNTSI2(1U) + +#define S_GNTSI3 27 +#define M_GNTSI3 0x7U +#define V_GNTSI3(x) ((x) << S_GNTSI3) +#define G_GNTSI3(x) (((x) >> S_GNTSI3) & M_GNTSI3) + +#define S_GNTSI4 16 +#define M_GNTSI4 0x7ffU +#define V_GNTSI4(x) ((x) << S_GNTSI4) +#define G_GNTSI4(x) (((x) >> S_GNTSI4) & M_GNTSI4) + +#define S_GNTSI5 8 +#define M_GNTSI5 0xffU +#define V_GNTSI5(x) ((x) << S_GNTSI5) +#define G_GNTSI5(x) (((x) >> S_GNTSI5) & M_GNTSI5) + +#define S_GNTSI6 7 +#define V_GNTSI6(x) ((x) << S_GNTSI6) +#define F_GNTSI6 V_GNTSI6(1U) + +#define S_GNTSI7 6 +#define V_GNTSI7(x) ((x) << S_GNTSI7) +#define F_GNTSI7 V_GNTSI7(1U) + +#define S_GNTSI8 5 +#define V_GNTSI8(x) ((x) << S_GNTSI8) +#define F_GNTSI8 V_GNTSI8(1U) + +#define S_GNTSI9 4 +#define V_GNTSI9(x) ((x) << S_GNTSI9) +#define F_GNTSI9 V_GNTSI9(1U) + +#define S_GNTSIA 3 +#define V_GNTSIA(x) ((x) << S_GNTSIA) +#define F_GNTSIA V_GNTSIA(1U) + +#define S_GNTAI 2 +#define V_GNTAI(x) ((x) << S_GNTAI) +#define F_GNTAI V_GNTAI(1U) + +#define S_GNTDB 1 +#define V_GNTDB(x) ((x) << S_GNTDB) +#define F_GNTDB V_GNTDB(1U) + +#define S_GNTDI 0 +#define V_GNTDI(x) ((x) << S_GNTDI) +#define F_GNTDI V_GNTDI(1U) + +#define A_PCIE_PDEBUG_REG_0X23 0x23 + +#define S_DI_REQVLD 31 +#define V_DI_REQVLD(x) ((x) << S_DI_REQVLD) +#define F_DI_REQVLD V_DI_REQVLD(1U) + +#define S_DI_REQRDY 30 +#define V_DI_REQRDY(x) ((x) << S_DI_REQRDY) +#define F_DI_REQRDY V_DI_REQRDY(1U) + +#define S_DI_REQWREN 19 +#define M_DI_REQWREN 0x7ffU +#define V_DI_REQWREN(x) ((x) << S_DI_REQWREN) +#define G_DI_REQWREN(x) (((x) >> S_DI_REQWREN) & M_DI_REQWREN) + +#define S_DI_REQMSIEN 18 +#define V_DI_REQMSIEN(x) ((x) << S_DI_REQMSIEN) +#define F_DI_REQMSIEN V_DI_REQMSIEN(1U) + +#define S_DI_REQMSXEN 17 +#define V_DI_REQMSXEN(x) ((x) << S_DI_REQMSXEN) +#define F_DI_REQMSXEN V_DI_REQMSXEN(1U) + +#define S_DI_REQMSXVFIDMSK 16 +#define V_DI_REQMSXVFIDMSK(x) ((x) << S_DI_REQMSXVFIDMSK) +#define F_DI_REQMSXVFIDMSK V_DI_REQMSXVFIDMSK(1U) + +#define S_DI_REQWREN2 2 +#define M_DI_REQWREN2 0x3fffU +#define V_DI_REQWREN2(x) ((x) << S_DI_REQWREN2) +#define G_DI_REQWREN2(x) (((x) >> S_DI_REQWREN2) & M_DI_REQWREN2) + +#define S_DI_REQRDEN 1 +#define V_DI_REQRDEN(x) ((x) << S_DI_REQRDEN) +#define F_DI_REQRDEN V_DI_REQRDEN(1U) + +#define S_DI_REQWREN3 0 +#define V_DI_REQWREN3(x) ((x) << S_DI_REQWREN3) +#define F_DI_REQWREN3 V_DI_REQWREN3(1U) + +#define A_PCIE_PDEBUG_REG_0X24 0x24 +#define A_PCIE_PDEBUG_REG_0X25 0x25 +#define A_PCIE_PDEBUG_REG_0X26 0x26 +#define A_PCIE_PDEBUG_REG_0X27 0x27 + +#define S_FID_STI_RSPVLD 31 +#define V_FID_STI_RSPVLD(x) ((x) << S_FID_STI_RSPVLD) +#define F_FID_STI_RSPVLD V_FID_STI_RSPVLD(1U) + +#define S_TAB_STIRDENA 30 +#define V_TAB_STIRDENA(x) ((x) << S_TAB_STIRDENA) +#define F_TAB_STIRDENA V_TAB_STIRDENA(1U) + +#define S_TAB_STIWRENA 29 +#define V_TAB_STIWRENA(x) ((x) << S_TAB_STIWRENA) +#define F_TAB_STIWRENA V_TAB_STIWRENA(1U) + +#define S_TAB_STIRDENA2 18 +#define M_TAB_STIRDENA2 0x7ffU +#define V_TAB_STIRDENA2(x) ((x) << S_TAB_STIRDENA2) +#define G_TAB_STIRDENA2(x) (((x) >> S_TAB_STIRDENA2) & M_TAB_STIRDENA2) + +#define S_T5_PLI_REQTABHIT 7 +#define M_T5_PLI_REQTABHIT 0x7ffU +#define V_T5_PLI_REQTABHIT(x) ((x) << S_T5_PLI_REQTABHIT) +#define G_T5_PLI_REQTABHIT(x) (((x) >> S_T5_PLI_REQTABHIT) & M_T5_PLI_REQTABHIT) + +#define S_T5_GNTSI 0 +#define M_T5_GNTSI 0x7fU +#define V_T5_GNTSI(x) ((x) << S_T5_GNTSI) +#define G_T5_GNTSI(x) (((x) >> S_T5_GNTSI) & M_T5_GNTSI) + +#define A_PCIE_PDEBUG_REG_0X28 0x28 + +#define S_PLI_REQWRVLD 31 +#define V_PLI_REQWRVLD(x) ((x) << S_PLI_REQWRVLD) +#define F_PLI_REQWRVLD V_PLI_REQWRVLD(1U) + +#define S_T5_PLI_REQPBAHIT 30 +#define V_T5_PLI_REQPBAHIT(x) ((x) << S_T5_PLI_REQPBAHIT) +#define F_T5_PLI_REQPBAHIT V_T5_PLI_REQPBAHIT(1U) + +#define S_PLI_TABADDRLWREN 29 +#define V_PLI_TABADDRLWREN(x) ((x) << S_PLI_TABADDRLWREN) +#define F_PLI_TABADDRLWREN V_PLI_TABADDRLWREN(1U) + +#define S_PLI_TABADDRHWREN 28 +#define V_PLI_TABADDRHWREN(x) ((x) << S_PLI_TABADDRHWREN) +#define F_PLI_TABADDRHWREN V_PLI_TABADDRHWREN(1U) + +#define S_T5_PLI_TABDATWREN 27 +#define V_T5_PLI_TABDATWREN(x) ((x) << S_T5_PLI_TABDATWREN) +#define F_T5_PLI_TABDATWREN V_T5_PLI_TABDATWREN(1U) + +#define S_PLI_TABMSKWREN 26 +#define V_PLI_TABMSKWREN(x) ((x) << S_PLI_TABMSKWREN) +#define F_PLI_TABMSKWREN V_PLI_TABMSKWREN(1U) + +#define S_AI_REQVLD 23 +#define M_AI_REQVLD 0x7U +#define V_AI_REQVLD(x) ((x) << S_AI_REQVLD) +#define G_AI_REQVLD(x) (((x) >> S_AI_REQVLD) & M_AI_REQVLD) + +#define S_AI_REQVLD2 22 +#define V_AI_REQVLD2(x) ((x) << S_AI_REQVLD2) +#define F_AI_REQVLD2 V_AI_REQVLD2(1U) + +#define S_AI_REQRDY 21 +#define V_AI_REQRDY(x) ((x) << S_AI_REQRDY) +#define F_AI_REQRDY V_AI_REQRDY(1U) + +#define S_VEN_MSI_REQ_28 18 +#define M_VEN_MSI_REQ_28 0x7U +#define V_VEN_MSI_REQ_28(x) ((x) << S_VEN_MSI_REQ_28) +#define G_VEN_MSI_REQ_28(x) (((x) >> S_VEN_MSI_REQ_28) & M_VEN_MSI_REQ_28) + +#define S_VEN_MSI_REQ2 11 +#define M_VEN_MSI_REQ2 0x7fU +#define V_VEN_MSI_REQ2(x) ((x) << S_VEN_MSI_REQ2) +#define G_VEN_MSI_REQ2(x) (((x) >> S_VEN_MSI_REQ2) & M_VEN_MSI_REQ2) + +#define S_VEN_MSI_REQ3 6 +#define M_VEN_MSI_REQ3 0x1fU +#define V_VEN_MSI_REQ3(x) ((x) << S_VEN_MSI_REQ3) +#define G_VEN_MSI_REQ3(x) (((x) >> S_VEN_MSI_REQ3) & M_VEN_MSI_REQ3) + +#define S_VEN_MSI_REQ4 3 +#define M_VEN_MSI_REQ4 0x7U +#define V_VEN_MSI_REQ4(x) ((x) << S_VEN_MSI_REQ4) +#define G_VEN_MSI_REQ4(x) (((x) >> S_VEN_MSI_REQ4) & M_VEN_MSI_REQ4) + +#define S_VEN_MSI_REQ5 2 +#define V_VEN_MSI_REQ5(x) ((x) << S_VEN_MSI_REQ5) +#define F_VEN_MSI_REQ5 V_VEN_MSI_REQ5(1U) + +#define S_VEN_MSI_GRANT 1 +#define V_VEN_MSI_GRANT(x) ((x) << S_VEN_MSI_GRANT) +#define F_VEN_MSI_GRANT V_VEN_MSI_GRANT(1U) + +#define S_VEN_MSI_REQ6 0 +#define V_VEN_MSI_REQ6(x) ((x) << S_VEN_MSI_REQ6) +#define F_VEN_MSI_REQ6 V_VEN_MSI_REQ6(1U) + +#define A_PCIE_PDEBUG_REG_0X29 0x29 + +#define S_TRGT1_REQDATAVLD 16 +#define M_TRGT1_REQDATAVLD 0xffffU +#define V_TRGT1_REQDATAVLD(x) ((x) << S_TRGT1_REQDATAVLD) +#define G_TRGT1_REQDATAVLD(x) (((x) >> S_TRGT1_REQDATAVLD) & M_TRGT1_REQDATAVLD) + +#define S_TRGT1_REQDATAVLD2 12 +#define M_TRGT1_REQDATAVLD2 0xfU +#define V_TRGT1_REQDATAVLD2(x) ((x) << S_TRGT1_REQDATAVLD2) +#define G_TRGT1_REQDATAVLD2(x) (((x) >> S_TRGT1_REQDATAVLD2) & M_TRGT1_REQDATAVLD2) + +#define S_TRGT1_REQDATAVLD3 11 +#define V_TRGT1_REQDATAVLD3(x) ((x) << S_TRGT1_REQDATAVLD3) +#define F_TRGT1_REQDATAVLD3 V_TRGT1_REQDATAVLD3(1U) + +#define S_TRGT1_REQDATAVLD4 10 +#define V_TRGT1_REQDATAVLD4(x) ((x) << S_TRGT1_REQDATAVLD4) +#define F_TRGT1_REQDATAVLD4 V_TRGT1_REQDATAVLD4(1U) + +#define S_TRGT1_REQDATAVLD5 9 +#define V_TRGT1_REQDATAVLD5(x) ((x) << S_TRGT1_REQDATAVLD5) +#define F_TRGT1_REQDATAVLD5 V_TRGT1_REQDATAVLD5(1U) + +#define S_TRGT1_REQDATAVLD6 8 +#define V_TRGT1_REQDATAVLD6(x) ((x) << S_TRGT1_REQDATAVLD6) +#define F_TRGT1_REQDATAVLD6 V_TRGT1_REQDATAVLD6(1U) + +#define S_TRGT1_REQDATAVLD7 4 +#define M_TRGT1_REQDATAVLD7 0xfU +#define V_TRGT1_REQDATAVLD7(x) ((x) << S_TRGT1_REQDATAVLD7) +#define G_TRGT1_REQDATAVLD7(x) (((x) >> S_TRGT1_REQDATAVLD7) & M_TRGT1_REQDATAVLD7) + +#define S_TRGT1_REQDATAVLD8 2 +#define M_TRGT1_REQDATAVLD8 0x3U +#define V_TRGT1_REQDATAVLD8(x) ((x) << S_TRGT1_REQDATAVLD8) +#define G_TRGT1_REQDATAVLD8(x) (((x) >> S_TRGT1_REQDATAVLD8) & M_TRGT1_REQDATAVLD8) + +#define S_TRGT1_REQDATARDY 1 +#define V_TRGT1_REQDATARDY(x) ((x) << S_TRGT1_REQDATARDY) +#define F_TRGT1_REQDATARDY V_TRGT1_REQDATARDY(1U) + +#define S_TRGT1_REQDATAVLD0 0 +#define V_TRGT1_REQDATAVLD0(x) ((x) << S_TRGT1_REQDATAVLD0) +#define F_TRGT1_REQDATAVLD0 V_TRGT1_REQDATAVLD0(1U) + +#define A_PCIE_PDEBUG_REG_0X2A 0x2a +#define A_PCIE_PDEBUG_REG_0X2B 0x2b + +#define S_RADM_TRGT1_ADDR 20 +#define M_RADM_TRGT1_ADDR 0xfffU +#define V_RADM_TRGT1_ADDR(x) ((x) << S_RADM_TRGT1_ADDR) +#define G_RADM_TRGT1_ADDR(x) (((x) >> S_RADM_TRGT1_ADDR) & M_RADM_TRGT1_ADDR) + +#define S_RADM_TRGT1_DWEN 16 +#define M_RADM_TRGT1_DWEN 0xfU +#define V_RADM_TRGT1_DWEN(x) ((x) << S_RADM_TRGT1_DWEN) +#define G_RADM_TRGT1_DWEN(x) (((x) >> S_RADM_TRGT1_DWEN) & M_RADM_TRGT1_DWEN) + +#define S_RADM_TRGT1_FMT 14 +#define M_RADM_TRGT1_FMT 0x3U +#define V_RADM_TRGT1_FMT(x) ((x) << S_RADM_TRGT1_FMT) +#define G_RADM_TRGT1_FMT(x) (((x) >> S_RADM_TRGT1_FMT) & M_RADM_TRGT1_FMT) + +#define S_RADM_TRGT1_TYPE 9 +#define M_RADM_TRGT1_TYPE 0x1fU +#define V_RADM_TRGT1_TYPE(x) ((x) << S_RADM_TRGT1_TYPE) +#define G_RADM_TRGT1_TYPE(x) (((x) >> S_RADM_TRGT1_TYPE) & M_RADM_TRGT1_TYPE) + +#define S_RADM_TRGT1_IN_MEMBAR_RANGE 6 +#define M_RADM_TRGT1_IN_MEMBAR_RANGE 0x7U +#define V_RADM_TRGT1_IN_MEMBAR_RANGE(x) ((x) << S_RADM_TRGT1_IN_MEMBAR_RANGE) +#define G_RADM_TRGT1_IN_MEMBAR_RANGE(x) (((x) >> S_RADM_TRGT1_IN_MEMBAR_RANGE) & M_RADM_TRGT1_IN_MEMBAR_RANGE) + +#define S_RADM_TRGT1_ECRC_ERR 5 +#define V_RADM_TRGT1_ECRC_ERR(x) ((x) << S_RADM_TRGT1_ECRC_ERR) +#define F_RADM_TRGT1_ECRC_ERR V_RADM_TRGT1_ECRC_ERR(1U) + +#define S_RADM_TRGT1_DLLP_ABORT 4 +#define V_RADM_TRGT1_DLLP_ABORT(x) ((x) << S_RADM_TRGT1_DLLP_ABORT) +#define F_RADM_TRGT1_DLLP_ABORT V_RADM_TRGT1_DLLP_ABORT(1U) + +#define S_RADM_TRGT1_TLP_ABORT 3 +#define V_RADM_TRGT1_TLP_ABORT(x) ((x) << S_RADM_TRGT1_TLP_ABORT) +#define F_RADM_TRGT1_TLP_ABORT V_RADM_TRGT1_TLP_ABORT(1U) + +#define S_RADM_TRGT1_EOT 2 +#define V_RADM_TRGT1_EOT(x) ((x) << S_RADM_TRGT1_EOT) +#define F_RADM_TRGT1_EOT V_RADM_TRGT1_EOT(1U) + +#define S_RADM_TRGT1_DV_2B 1 +#define V_RADM_TRGT1_DV_2B(x) ((x) << S_RADM_TRGT1_DV_2B) +#define F_RADM_TRGT1_DV_2B V_RADM_TRGT1_DV_2B(1U) + +#define S_RADM_TRGT1_HV_2B 0 +#define V_RADM_TRGT1_HV_2B(x) ((x) << S_RADM_TRGT1_HV_2B) +#define F_RADM_TRGT1_HV_2B V_RADM_TRGT1_HV_2B(1U) + +#define A_PCIE_PDEBUG_REG_0X2C 0x2c + +#define S_STATEMPIO 29 +#define M_STATEMPIO 0x7U +#define V_STATEMPIO(x) ((x) << S_STATEMPIO) +#define G_STATEMPIO(x) (((x) >> S_STATEMPIO) & M_STATEMPIO) + +#define S_STATECPL 25 +#define M_STATECPL 0xfU +#define V_STATECPL(x) ((x) << S_STATECPL) +#define G_STATECPL(x) (((x) >> S_STATECPL) & M_STATECPL) + +#define S_STATEALIN 22 +#define M_STATEALIN 0x7U +#define V_STATEALIN(x) ((x) << S_STATEALIN) +#define G_STATEALIN(x) (((x) >> S_STATEALIN) & M_STATEALIN) + +#define S_STATEPL 19 +#define M_STATEPL 0x7U +#define V_STATEPL(x) ((x) << S_STATEPL) +#define G_STATEPL(x) (((x) >> S_STATEPL) & M_STATEPL) + +#define S_STATEMARSP 18 +#define V_STATEMARSP(x) ((x) << S_STATEMARSP) +#define F_STATEMARSP V_STATEMARSP(1U) + +#define S_MA_TAGSINUSE 11 +#define M_MA_TAGSINUSE 0x7fU +#define V_MA_TAGSINUSE(x) ((x) << S_MA_TAGSINUSE) +#define G_MA_TAGSINUSE(x) (((x) >> S_MA_TAGSINUSE) & M_MA_TAGSINUSE) + +#define S_RADM_TRGT1_HSRDY 10 +#define V_RADM_TRGT1_HSRDY(x) ((x) << S_RADM_TRGT1_HSRDY) +#define F_RADM_TRGT1_HSRDY V_RADM_TRGT1_HSRDY(1U) + +#define S_RADM_TRGT1_DSRDY 9 +#define V_RADM_TRGT1_DSRDY(x) ((x) << S_RADM_TRGT1_DSRDY) +#define F_RADM_TRGT1_DSRDY V_RADM_TRGT1_DSRDY(1U) + +#define S_ALIND_REQWRDATAVLD 8 +#define V_ALIND_REQWRDATAVLD(x) ((x) << S_ALIND_REQWRDATAVLD) +#define F_ALIND_REQWRDATAVLD V_ALIND_REQWRDATAVLD(1U) + +#define S_FID_LKUPWRHDRVLD 7 +#define V_FID_LKUPWRHDRVLD(x) ((x) << S_FID_LKUPWRHDRVLD) +#define F_FID_LKUPWRHDRVLD V_FID_LKUPWRHDRVLD(1U) + +#define S_MPIO_WRVLD 6 +#define V_MPIO_WRVLD(x) ((x) << S_MPIO_WRVLD) +#define F_MPIO_WRVLD V_MPIO_WRVLD(1U) + +#define S_TRGT1_RADM_HALT 5 +#define V_TRGT1_RADM_HALT(x) ((x) << S_TRGT1_RADM_HALT) +#define F_TRGT1_RADM_HALT V_TRGT1_RADM_HALT(1U) + +#define S_RADM_TRGT1_DV_2C 4 +#define V_RADM_TRGT1_DV_2C(x) ((x) << S_RADM_TRGT1_DV_2C) +#define F_RADM_TRGT1_DV_2C V_RADM_TRGT1_DV_2C(1U) + +#define S_RADM_TRGT1_DV_2C_2 3 +#define V_RADM_TRGT1_DV_2C_2(x) ((x) << S_RADM_TRGT1_DV_2C_2) +#define F_RADM_TRGT1_DV_2C_2 V_RADM_TRGT1_DV_2C_2(1U) + +#define S_RADM_TRGT1_TLP_ABORT_2C 2 +#define V_RADM_TRGT1_TLP_ABORT_2C(x) ((x) << S_RADM_TRGT1_TLP_ABORT_2C) +#define F_RADM_TRGT1_TLP_ABORT_2C V_RADM_TRGT1_TLP_ABORT_2C(1U) + +#define S_RADM_TRGT1_DLLP_ABORT_2C 1 +#define V_RADM_TRGT1_DLLP_ABORT_2C(x) ((x) << S_RADM_TRGT1_DLLP_ABORT_2C) +#define F_RADM_TRGT1_DLLP_ABORT_2C V_RADM_TRGT1_DLLP_ABORT_2C(1U) + +#define S_RADM_TRGT1_ECRC_ERR_2C 0 +#define V_RADM_TRGT1_ECRC_ERR_2C(x) ((x) << S_RADM_TRGT1_ECRC_ERR_2C) +#define F_RADM_TRGT1_ECRC_ERR_2C V_RADM_TRGT1_ECRC_ERR_2C(1U) + +#define A_PCIE_PDEBUG_REG_0X2D 0x2d + +#define S_RADM_TRGT1_HV_2D 31 +#define V_RADM_TRGT1_HV_2D(x) ((x) << S_RADM_TRGT1_HV_2D) +#define F_RADM_TRGT1_HV_2D V_RADM_TRGT1_HV_2D(1U) + +#define S_RADM_TRGT1_DV_2D 30 +#define V_RADM_TRGT1_DV_2D(x) ((x) << S_RADM_TRGT1_DV_2D) +#define F_RADM_TRGT1_DV_2D V_RADM_TRGT1_DV_2D(1U) + +#define S_RADM_TRGT1_HV2 23 +#define M_RADM_TRGT1_HV2 0x7fU +#define V_RADM_TRGT1_HV2(x) ((x) << S_RADM_TRGT1_HV2) +#define G_RADM_TRGT1_HV2(x) (((x) >> S_RADM_TRGT1_HV2) & M_RADM_TRGT1_HV2) + +#define S_RADM_TRGT1_HV3 20 +#define M_RADM_TRGT1_HV3 0x7U +#define V_RADM_TRGT1_HV3(x) ((x) << S_RADM_TRGT1_HV3) +#define G_RADM_TRGT1_HV3(x) (((x) >> S_RADM_TRGT1_HV3) & M_RADM_TRGT1_HV3) + +#define S_RADM_TRGT1_HV4 16 +#define M_RADM_TRGT1_HV4 0xfU +#define V_RADM_TRGT1_HV4(x) ((x) << S_RADM_TRGT1_HV4) +#define G_RADM_TRGT1_HV4(x) (((x) >> S_RADM_TRGT1_HV4) & M_RADM_TRGT1_HV4) + +#define S_RADM_TRGT1_HV5 12 +#define M_RADM_TRGT1_HV5 0xfU +#define V_RADM_TRGT1_HV5(x) ((x) << S_RADM_TRGT1_HV5) +#define G_RADM_TRGT1_HV5(x) (((x) >> S_RADM_TRGT1_HV5) & M_RADM_TRGT1_HV5) + +#define S_RADM_TRGT1_HV6 11 +#define V_RADM_TRGT1_HV6(x) ((x) << S_RADM_TRGT1_HV6) +#define F_RADM_TRGT1_HV6 V_RADM_TRGT1_HV6(1U) + +#define S_RADM_TRGT1_HV7 10 +#define V_RADM_TRGT1_HV7(x) ((x) << S_RADM_TRGT1_HV7) +#define F_RADM_TRGT1_HV7 V_RADM_TRGT1_HV7(1U) + +#define S_RADM_TRGT1_HV8 7 +#define M_RADM_TRGT1_HV8 0x7U +#define V_RADM_TRGT1_HV8(x) ((x) << S_RADM_TRGT1_HV8) +#define G_RADM_TRGT1_HV8(x) (((x) >> S_RADM_TRGT1_HV8) & M_RADM_TRGT1_HV8) + +#define S_RADM_TRGT1_HV9 6 +#define V_RADM_TRGT1_HV9(x) ((x) << S_RADM_TRGT1_HV9) +#define F_RADM_TRGT1_HV9 V_RADM_TRGT1_HV9(1U) + +#define S_RADM_TRGT1_HVA 5 +#define V_RADM_TRGT1_HVA(x) ((x) << S_RADM_TRGT1_HVA) +#define F_RADM_TRGT1_HVA V_RADM_TRGT1_HVA(1U) + +#define S_RADM_TRGT1_DSRDY_2D 4 +#define V_RADM_TRGT1_DSRDY_2D(x) ((x) << S_RADM_TRGT1_DSRDY_2D) +#define F_RADM_TRGT1_DSRDY_2D V_RADM_TRGT1_DSRDY_2D(1U) + +#define S_RADM_TRGT1_WRCNT 0 +#define M_RADM_TRGT1_WRCNT 0xfU +#define V_RADM_TRGT1_WRCNT(x) ((x) << S_RADM_TRGT1_WRCNT) +#define G_RADM_TRGT1_WRCNT(x) (((x) >> S_RADM_TRGT1_WRCNT) & M_RADM_TRGT1_WRCNT) + +#define A_PCIE_PDEBUG_REG_0X2E 0x2e + +#define S_RADM_TRGT1_HV_2E 30 +#define M_RADM_TRGT1_HV_2E 0x3U +#define V_RADM_TRGT1_HV_2E(x) ((x) << S_RADM_TRGT1_HV_2E) +#define G_RADM_TRGT1_HV_2E(x) (((x) >> S_RADM_TRGT1_HV_2E) & M_RADM_TRGT1_HV_2E) + +#define S_RADM_TRGT1_HV_2E_2 20 +#define M_RADM_TRGT1_HV_2E_2 0x3ffU +#define V_RADM_TRGT1_HV_2E_2(x) ((x) << S_RADM_TRGT1_HV_2E_2) +#define G_RADM_TRGT1_HV_2E_2(x) (((x) >> S_RADM_TRGT1_HV_2E_2) & M_RADM_TRGT1_HV_2E_2) + +#define S_RADM_TRGT1_HV_WE_3 12 +#define M_RADM_TRGT1_HV_WE_3 0xffU +#define V_RADM_TRGT1_HV_WE_3(x) ((x) << S_RADM_TRGT1_HV_WE_3) +#define G_RADM_TRGT1_HV_WE_3(x) (((x) >> S_RADM_TRGT1_HV_WE_3) & M_RADM_TRGT1_HV_WE_3) + +#define S_ALIN_REQDATAVLD4 8 +#define M_ALIN_REQDATAVLD4 0xfU +#define V_ALIN_REQDATAVLD4(x) ((x) << S_ALIN_REQDATAVLD4) +#define G_ALIN_REQDATAVLD4(x) (((x) >> S_ALIN_REQDATAVLD4) & M_ALIN_REQDATAVLD4) + +#define S_ALIN_REQDATAVLD5 7 +#define V_ALIN_REQDATAVLD5(x) ((x) << S_ALIN_REQDATAVLD5) +#define F_ALIN_REQDATAVLD5 V_ALIN_REQDATAVLD5(1U) + +#define S_ALIN_REQDATAVLD6 6 +#define V_ALIN_REQDATAVLD6(x) ((x) << S_ALIN_REQDATAVLD6) +#define F_ALIN_REQDATAVLD6 V_ALIN_REQDATAVLD6(1U) + +#define S_ALIN_REQDATAVLD7 4 +#define M_ALIN_REQDATAVLD7 0x3U +#define V_ALIN_REQDATAVLD7(x) ((x) << S_ALIN_REQDATAVLD7) +#define G_ALIN_REQDATAVLD7(x) (((x) >> S_ALIN_REQDATAVLD7) & M_ALIN_REQDATAVLD7) + +#define S_ALIN_REQDATAVLD8 3 +#define V_ALIN_REQDATAVLD8(x) ((x) << S_ALIN_REQDATAVLD8) +#define F_ALIN_REQDATAVLD8 V_ALIN_REQDATAVLD8(1U) + +#define S_ALIN_REQDATAVLD9 2 +#define V_ALIN_REQDATAVLD9(x) ((x) << S_ALIN_REQDATAVLD9) +#define F_ALIN_REQDATAVLD9 V_ALIN_REQDATAVLD9(1U) + +#define S_ALIN_REQDATARDY 1 +#define V_ALIN_REQDATARDY(x) ((x) << S_ALIN_REQDATARDY) +#define F_ALIN_REQDATARDY V_ALIN_REQDATARDY(1U) + +#define S_ALIN_REQDATAVLDA 0 +#define V_ALIN_REQDATAVLDA(x) ((x) << S_ALIN_REQDATAVLDA) +#define F_ALIN_REQDATAVLDA V_ALIN_REQDATAVLDA(1U) + +#define A_PCIE_PDEBUG_REG_0X2F 0x2f +#define A_PCIE_PDEBUG_REG_0X30 0x30 + +#define S_RADM_TRGT1_HV_30 25 +#define M_RADM_TRGT1_HV_30 0x7fU +#define V_RADM_TRGT1_HV_30(x) ((x) << S_RADM_TRGT1_HV_30) +#define G_RADM_TRGT1_HV_30(x) (((x) >> S_RADM_TRGT1_HV_30) & M_RADM_TRGT1_HV_30) + +#define S_PIO_WRCNT 15 +#define M_PIO_WRCNT 0x3ffU +#define V_PIO_WRCNT(x) ((x) << S_PIO_WRCNT) +#define G_PIO_WRCNT(x) (((x) >> S_PIO_WRCNT) & M_PIO_WRCNT) + +#define S_ALIND_REQWRCNT 12 +#define M_ALIND_REQWRCNT 0x7U +#define V_ALIND_REQWRCNT(x) ((x) << S_ALIND_REQWRCNT) +#define G_ALIND_REQWRCNT(x) (((x) >> S_ALIND_REQWRCNT) & M_ALIND_REQWRCNT) + +#define S_FID_LKUPWRCNT 9 +#define M_FID_LKUPWRCNT 0x7U +#define V_FID_LKUPWRCNT(x) ((x) << S_FID_LKUPWRCNT) +#define G_FID_LKUPWRCNT(x) (((x) >> S_FID_LKUPWRCNT) & M_FID_LKUPWRCNT) + +#define S_ALIND_REQRDDATAVLD 8 +#define V_ALIND_REQRDDATAVLD(x) ((x) << S_ALIND_REQRDDATAVLD) +#define F_ALIND_REQRDDATAVLD V_ALIND_REQRDDATAVLD(1U) + +#define S_ALIND_REQRDDATARDY 7 +#define V_ALIND_REQRDDATARDY(x) ((x) << S_ALIND_REQRDDATARDY) +#define F_ALIND_REQRDDATARDY V_ALIND_REQRDDATARDY(1U) + +#define S_ALIND_REQRDDATAVLD2 6 +#define V_ALIND_REQRDDATAVLD2(x) ((x) << S_ALIND_REQRDDATAVLD2) +#define F_ALIND_REQRDDATAVLD2 V_ALIND_REQRDDATAVLD2(1U) + +#define S_ALIND_REQWRDATAVLD3 3 +#define M_ALIND_REQWRDATAVLD3 0x7U +#define V_ALIND_REQWRDATAVLD3(x) ((x) << S_ALIND_REQWRDATAVLD3) +#define G_ALIND_REQWRDATAVLD3(x) (((x) >> S_ALIND_REQWRDATAVLD3) & M_ALIND_REQWRDATAVLD3) + +#define S_ALIND_REQWRDATAVLD4 2 +#define V_ALIND_REQWRDATAVLD4(x) ((x) << S_ALIND_REQWRDATAVLD4) +#define F_ALIND_REQWRDATAVLD4 V_ALIND_REQWRDATAVLD4(1U) + +#define S_ALIND_REQWRDATARDYOPEN 1 +#define V_ALIND_REQWRDATARDYOPEN(x) ((x) << S_ALIND_REQWRDATARDYOPEN) +#define F_ALIND_REQWRDATARDYOPEN V_ALIND_REQWRDATARDYOPEN(1U) + +#define S_ALIND_REQWRDATAVLD5 0 +#define V_ALIND_REQWRDATAVLD5(x) ((x) << S_ALIND_REQWRDATAVLD5) +#define F_ALIND_REQWRDATAVLD5 V_ALIND_REQWRDATAVLD5(1U) + +#define A_PCIE_PDEBUG_REG_0X31 0x31 +#define A_PCIE_PDEBUG_REG_0X32 0x32 +#define A_PCIE_PDEBUG_REG_0X33 0x33 +#define A_PCIE_PDEBUG_REG_0X34 0x34 +#define A_PCIE_PDEBUG_REG_0X35 0x35 + +#define S_T5_MPIO_WRVLD 19 +#define M_T5_MPIO_WRVLD 0x1fffU +#define V_T5_MPIO_WRVLD(x) ((x) << S_T5_MPIO_WRVLD) +#define G_T5_MPIO_WRVLD(x) (((x) >> S_T5_MPIO_WRVLD) & M_T5_MPIO_WRVLD) + +#define S_FID_LKUPRDHDRVLD 18 +#define V_FID_LKUPRDHDRVLD(x) ((x) << S_FID_LKUPRDHDRVLD) +#define F_FID_LKUPRDHDRVLD V_FID_LKUPRDHDRVLD(1U) + +#define S_FID_LKUPRDHDRVLD2 17 +#define V_FID_LKUPRDHDRVLD2(x) ((x) << S_FID_LKUPRDHDRVLD2) +#define F_FID_LKUPRDHDRVLD2 V_FID_LKUPRDHDRVLD2(1U) + +#define S_FID_LKUPRDHDRVLD3 16 +#define V_FID_LKUPRDHDRVLD3(x) ((x) << S_FID_LKUPRDHDRVLD3) +#define F_FID_LKUPRDHDRVLD3 V_FID_LKUPRDHDRVLD3(1U) + +#define S_FID_LKUPRDHDRVLD4 15 +#define V_FID_LKUPRDHDRVLD4(x) ((x) << S_FID_LKUPRDHDRVLD4) +#define F_FID_LKUPRDHDRVLD4 V_FID_LKUPRDHDRVLD4(1U) + +#define S_FID_LKUPRDHDRVLD5 14 +#define V_FID_LKUPRDHDRVLD5(x) ((x) << S_FID_LKUPRDHDRVLD5) +#define F_FID_LKUPRDHDRVLD5 V_FID_LKUPRDHDRVLD5(1U) + +#define S_FID_LKUPRDHDRVLD6 13 +#define V_FID_LKUPRDHDRVLD6(x) ((x) << S_FID_LKUPRDHDRVLD6) +#define F_FID_LKUPRDHDRVLD6 V_FID_LKUPRDHDRVLD6(1U) + +#define S_FID_LKUPRDHDRVLD7 12 +#define V_FID_LKUPRDHDRVLD7(x) ((x) << S_FID_LKUPRDHDRVLD7) +#define F_FID_LKUPRDHDRVLD7 V_FID_LKUPRDHDRVLD7(1U) + +#define S_FID_LKUPRDHDRVLD8 11 +#define V_FID_LKUPRDHDRVLD8(x) ((x) << S_FID_LKUPRDHDRVLD8) +#define F_FID_LKUPRDHDRVLD8 V_FID_LKUPRDHDRVLD8(1U) + +#define S_FID_LKUPRDHDRVLD9 10 +#define V_FID_LKUPRDHDRVLD9(x) ((x) << S_FID_LKUPRDHDRVLD9) +#define F_FID_LKUPRDHDRVLD9 V_FID_LKUPRDHDRVLD9(1U) + +#define S_FID_LKUPRDHDRVLDA 9 +#define V_FID_LKUPRDHDRVLDA(x) ((x) << S_FID_LKUPRDHDRVLDA) +#define F_FID_LKUPRDHDRVLDA V_FID_LKUPRDHDRVLDA(1U) + +#define S_FID_LKUPRDHDRVLDB 8 +#define V_FID_LKUPRDHDRVLDB(x) ((x) << S_FID_LKUPRDHDRVLDB) +#define F_FID_LKUPRDHDRVLDB V_FID_LKUPRDHDRVLDB(1U) + +#define S_FID_LKUPRDHDRVLDC 7 +#define V_FID_LKUPRDHDRVLDC(x) ((x) << S_FID_LKUPRDHDRVLDC) +#define F_FID_LKUPRDHDRVLDC V_FID_LKUPRDHDRVLDC(1U) + +#define S_MPIO_WRVLD1 6 +#define V_MPIO_WRVLD1(x) ((x) << S_MPIO_WRVLD1) +#define F_MPIO_WRVLD1 V_MPIO_WRVLD1(1U) + +#define S_MPIO_WRVLD2 5 +#define V_MPIO_WRVLD2(x) ((x) << S_MPIO_WRVLD2) +#define F_MPIO_WRVLD2 V_MPIO_WRVLD2(1U) + +#define S_MPIO_WRVLD3 4 +#define V_MPIO_WRVLD3(x) ((x) << S_MPIO_WRVLD3) +#define F_MPIO_WRVLD3 V_MPIO_WRVLD3(1U) + +#define S_MPIO_WRVLD4 0 +#define M_MPIO_WRVLD4 0xfU +#define V_MPIO_WRVLD4(x) ((x) << S_MPIO_WRVLD4) +#define G_MPIO_WRVLD4(x) (((x) >> S_MPIO_WRVLD4) & M_MPIO_WRVLD4) + +#define A_PCIE_PDEBUG_REG_0X36 0x36 +#define A_PCIE_PDEBUG_REG_0X37 0x37 +#define A_PCIE_PDEBUG_REG_0X38 0x38 +#define A_PCIE_PDEBUG_REG_0X39 0x39 +#define A_PCIE_PDEBUG_REG_0X3A 0x3a + +#define S_CLIENT0_TLP_VFUNC_ACTIVE 31 +#define V_CLIENT0_TLP_VFUNC_ACTIVE(x) ((x) << S_CLIENT0_TLP_VFUNC_ACTIVE) +#define F_CLIENT0_TLP_VFUNC_ACTIVE V_CLIENT0_TLP_VFUNC_ACTIVE(1U) + +#define S_CLIENT0_TLP_VFUNC_NUM 24 +#define M_CLIENT0_TLP_VFUNC_NUM 0x7fU +#define V_CLIENT0_TLP_VFUNC_NUM(x) ((x) << S_CLIENT0_TLP_VFUNC_NUM) +#define G_CLIENT0_TLP_VFUNC_NUM(x) (((x) >> S_CLIENT0_TLP_VFUNC_NUM) & M_CLIENT0_TLP_VFUNC_NUM) + +#define S_CLIENT0_TLP_FUNC_NUM 21 +#define M_CLIENT0_TLP_FUNC_NUM 0x7U +#define V_CLIENT0_TLP_FUNC_NUM(x) ((x) << S_CLIENT0_TLP_FUNC_NUM) +#define G_CLIENT0_TLP_FUNC_NUM(x) (((x) >> S_CLIENT0_TLP_FUNC_NUM) & M_CLIENT0_TLP_FUNC_NUM) + +#define S_CLIENT0_TLP_BYTE_EN 13 +#define M_CLIENT0_TLP_BYTE_EN 0xffU +#define V_CLIENT0_TLP_BYTE_EN(x) ((x) << S_CLIENT0_TLP_BYTE_EN) +#define G_CLIENT0_TLP_BYTE_EN(x) (((x) >> S_CLIENT0_TLP_BYTE_EN) & M_CLIENT0_TLP_BYTE_EN) + +#define S_CLIENT0_TLP_BYTE_LEN 0 +#define M_CLIENT0_TLP_BYTE_LEN 0x1fffU +#define V_CLIENT0_TLP_BYTE_LEN(x) ((x) << S_CLIENT0_TLP_BYTE_LEN) +#define G_CLIENT0_TLP_BYTE_LEN(x) (((x) >> S_CLIENT0_TLP_BYTE_LEN) & M_CLIENT0_TLP_BYTE_LEN) + +#define A_PCIE_PDEBUG_REG_0X3B 0x3b + +#define S_XADM_CLIENT0_HALT 31 +#define V_XADM_CLIENT0_HALT(x) ((x) << S_XADM_CLIENT0_HALT) +#define F_XADM_CLIENT0_HALT V_XADM_CLIENT0_HALT(1U) + +#define S_CLIENT0_TLP_DV 30 +#define V_CLIENT0_TLP_DV(x) ((x) << S_CLIENT0_TLP_DV) +#define F_CLIENT0_TLP_DV V_CLIENT0_TLP_DV(1U) + +#define S_CLIENT0_ADDR_ALIGN_EN 29 +#define V_CLIENT0_ADDR_ALIGN_EN(x) ((x) << S_CLIENT0_ADDR_ALIGN_EN) +#define F_CLIENT0_ADDR_ALIGN_EN V_CLIENT0_ADDR_ALIGN_EN(1U) + +#define S_CLIENT0_CPL_BCM 28 +#define V_CLIENT0_CPL_BCM(x) ((x) << S_CLIENT0_CPL_BCM) +#define F_CLIENT0_CPL_BCM V_CLIENT0_CPL_BCM(1U) + +#define S_CLIENT0_TLP_EP 27 +#define V_CLIENT0_TLP_EP(x) ((x) << S_CLIENT0_TLP_EP) +#define F_CLIENT0_TLP_EP V_CLIENT0_TLP_EP(1U) + +#define S_CLIENT0_CPL_STATUS 24 +#define M_CLIENT0_CPL_STATUS 0x7U +#define V_CLIENT0_CPL_STATUS(x) ((x) << S_CLIENT0_CPL_STATUS) +#define G_CLIENT0_CPL_STATUS(x) (((x) >> S_CLIENT0_CPL_STATUS) & M_CLIENT0_CPL_STATUS) + +#define S_CLIENT0_TLP_TD 23 +#define V_CLIENT0_TLP_TD(x) ((x) << S_CLIENT0_TLP_TD) +#define F_CLIENT0_TLP_TD V_CLIENT0_TLP_TD(1U) + +#define S_CLIENT0_TLP_TYPE 18 +#define M_CLIENT0_TLP_TYPE 0x1fU +#define V_CLIENT0_TLP_TYPE(x) ((x) << S_CLIENT0_TLP_TYPE) +#define G_CLIENT0_TLP_TYPE(x) (((x) >> S_CLIENT0_TLP_TYPE) & M_CLIENT0_TLP_TYPE) + +#define S_CLIENT0_TLP_FMT 16 +#define M_CLIENT0_TLP_FMT 0x3U +#define V_CLIENT0_TLP_FMT(x) ((x) << S_CLIENT0_TLP_FMT) +#define G_CLIENT0_TLP_FMT(x) (((x) >> S_CLIENT0_TLP_FMT) & M_CLIENT0_TLP_FMT) + +#define S_CLIENT0_TLP_BAD_EOT 15 +#define V_CLIENT0_TLP_BAD_EOT(x) ((x) << S_CLIENT0_TLP_BAD_EOT) +#define F_CLIENT0_TLP_BAD_EOT V_CLIENT0_TLP_BAD_EOT(1U) + +#define S_CLIENT0_TLP_EOT 14 +#define V_CLIENT0_TLP_EOT(x) ((x) << S_CLIENT0_TLP_EOT) +#define F_CLIENT0_TLP_EOT V_CLIENT0_TLP_EOT(1U) + +#define S_CLIENT0_TLP_ATTR 11 +#define M_CLIENT0_TLP_ATTR 0x7U +#define V_CLIENT0_TLP_ATTR(x) ((x) << S_CLIENT0_TLP_ATTR) +#define G_CLIENT0_TLP_ATTR(x) (((x) >> S_CLIENT0_TLP_ATTR) & M_CLIENT0_TLP_ATTR) + +#define S_CLIENT0_TLP_TC 8 +#define M_CLIENT0_TLP_TC 0x7U +#define V_CLIENT0_TLP_TC(x) ((x) << S_CLIENT0_TLP_TC) +#define G_CLIENT0_TLP_TC(x) (((x) >> S_CLIENT0_TLP_TC) & M_CLIENT0_TLP_TC) + +#define S_CLIENT0_TLP_TID 0 +#define M_CLIENT0_TLP_TID 0xffU +#define V_CLIENT0_TLP_TID(x) ((x) << S_CLIENT0_TLP_TID) +#define G_CLIENT0_TLP_TID(x) (((x) >> S_CLIENT0_TLP_TID) & M_CLIENT0_TLP_TID) + +#define A_PCIE_PDEBUG_REG_0X3C 0x3c + +#define S_MEM_RSPRRAVLD 31 +#define V_MEM_RSPRRAVLD(x) ((x) << S_MEM_RSPRRAVLD) +#define F_MEM_RSPRRAVLD V_MEM_RSPRRAVLD(1U) + +#define S_MEM_RSPRRARDY 30 +#define V_MEM_RSPRRARDY(x) ((x) << S_MEM_RSPRRARDY) +#define F_MEM_RSPRRARDY V_MEM_RSPRRARDY(1U) + +#define S_PIO_RSPRRAVLD 29 +#define V_PIO_RSPRRAVLD(x) ((x) << S_PIO_RSPRRAVLD) +#define F_PIO_RSPRRAVLD V_PIO_RSPRRAVLD(1U) + +#define S_PIO_RSPRRARDY 28 +#define V_PIO_RSPRRARDY(x) ((x) << S_PIO_RSPRRARDY) +#define F_PIO_RSPRRARDY V_PIO_RSPRRARDY(1U) + +#define S_MEM_RSPRDVLD 27 +#define V_MEM_RSPRDVLD(x) ((x) << S_MEM_RSPRDVLD) +#define F_MEM_RSPRDVLD V_MEM_RSPRDVLD(1U) + +#define S_MEM_RSPRDRRARDY 26 +#define V_MEM_RSPRDRRARDY(x) ((x) << S_MEM_RSPRDRRARDY) +#define F_MEM_RSPRDRRARDY V_MEM_RSPRDRRARDY(1U) + +#define S_PIO_RSPRDVLD 25 +#define V_PIO_RSPRDVLD(x) ((x) << S_PIO_RSPRDVLD) +#define F_PIO_RSPRDVLD V_PIO_RSPRDVLD(1U) + +#define S_PIO_RSPRDRRARDY 24 +#define V_PIO_RSPRDRRARDY(x) ((x) << S_PIO_RSPRDRRARDY) +#define F_PIO_RSPRDRRARDY V_PIO_RSPRDRRARDY(1U) + +#define S_TGT_TAGQ_RDVLD 16 +#define M_TGT_TAGQ_RDVLD 0xffU +#define V_TGT_TAGQ_RDVLD(x) ((x) << S_TGT_TAGQ_RDVLD) +#define G_TGT_TAGQ_RDVLD(x) (((x) >> S_TGT_TAGQ_RDVLD) & M_TGT_TAGQ_RDVLD) + +#define S_CPLTXNDISABLE 8 +#define M_CPLTXNDISABLE 0xffU +#define V_CPLTXNDISABLE(x) ((x) << S_CPLTXNDISABLE) +#define G_CPLTXNDISABLE(x) (((x) >> S_CPLTXNDISABLE) & M_CPLTXNDISABLE) + +#define S_CPLTXNDISABLE2 7 +#define V_CPLTXNDISABLE2(x) ((x) << S_CPLTXNDISABLE2) +#define F_CPLTXNDISABLE2 V_CPLTXNDISABLE2(1U) + +#define S_CLIENT0_TLP_HV 0 +#define M_CLIENT0_TLP_HV 0x7fU +#define V_CLIENT0_TLP_HV(x) ((x) << S_CLIENT0_TLP_HV) +#define G_CLIENT0_TLP_HV(x) (((x) >> S_CLIENT0_TLP_HV) & M_CLIENT0_TLP_HV) + +#define A_PCIE_PDEBUG_REG_0X3D 0x3d +#define A_PCIE_PDEBUG_REG_0X3E 0x3e +#define A_PCIE_PDEBUG_REG_0X3F 0x3f +#define A_PCIE_PDEBUG_REG_0X40 0x40 +#define A_PCIE_PDEBUG_REG_0X41 0x41 +#define A_PCIE_PDEBUG_REG_0X42 0x42 +#define A_PCIE_PDEBUG_REG_0X43 0x43 +#define A_PCIE_PDEBUG_REG_0X44 0x44 +#define A_PCIE_PDEBUG_REG_0X45 0x45 +#define A_PCIE_PDEBUG_REG_0X46 0x46 +#define A_PCIE_PDEBUG_REG_0X47 0x47 +#define A_PCIE_PDEBUG_REG_0X48 0x48 +#define A_PCIE_PDEBUG_REG_0X49 0x49 +#define A_PCIE_PDEBUG_REG_0X4A 0x4a +#define A_PCIE_PDEBUG_REG_0X4B 0x4b +#define A_PCIE_PDEBUG_REG_0X4C 0x4c +#define A_PCIE_PDEBUG_REG_0X4D 0x4d +#define A_PCIE_PDEBUG_REG_0X4E 0x4e +#define A_PCIE_PDEBUG_REG_0X4F 0x4f +#define A_PCIE_PDEBUG_REG_0X50 0x50 +#define A_PCIE_CDEBUG_REG_0X0 0x0 +#define A_PCIE_CDEBUG_REG_0X1 0x1 +#define A_PCIE_CDEBUG_REG_0X2 0x2 + +#define S_FLR_REQVLD 31 +#define V_FLR_REQVLD(x) ((x) << S_FLR_REQVLD) +#define F_FLR_REQVLD V_FLR_REQVLD(1U) + +#define S_D_RSPVLD 28 +#define M_D_RSPVLD 0x7U +#define V_D_RSPVLD(x) ((x) << S_D_RSPVLD) +#define G_D_RSPVLD(x) (((x) >> S_D_RSPVLD) & M_D_RSPVLD) + +#define S_D_RSPVLD2 27 +#define V_D_RSPVLD2(x) ((x) << S_D_RSPVLD2) +#define F_D_RSPVLD2 V_D_RSPVLD2(1U) + +#define S_D_RSPVLD3 26 +#define V_D_RSPVLD3(x) ((x) << S_D_RSPVLD3) +#define F_D_RSPVLD3 V_D_RSPVLD3(1U) + +#define S_D_RSPVLD4 25 +#define V_D_RSPVLD4(x) ((x) << S_D_RSPVLD4) +#define F_D_RSPVLD4 V_D_RSPVLD4(1U) + +#define S_D_RSPVLD5 24 +#define V_D_RSPVLD5(x) ((x) << S_D_RSPVLD5) +#define F_D_RSPVLD5 V_D_RSPVLD5(1U) + +#define S_D_RSPVLD6 20 +#define M_D_RSPVLD6 0xfU +#define V_D_RSPVLD6(x) ((x) << S_D_RSPVLD6) +#define G_D_RSPVLD6(x) (((x) >> S_D_RSPVLD6) & M_D_RSPVLD6) + +#define S_D_RSPAFULL 16 +#define M_D_RSPAFULL 0xfU +#define V_D_RSPAFULL(x) ((x) << S_D_RSPAFULL) +#define G_D_RSPAFULL(x) (((x) >> S_D_RSPAFULL) & M_D_RSPAFULL) + +#define S_D_RDREQVLD 12 +#define M_D_RDREQVLD 0xfU +#define V_D_RDREQVLD(x) ((x) << S_D_RDREQVLD) +#define G_D_RDREQVLD(x) (((x) >> S_D_RDREQVLD) & M_D_RDREQVLD) + +#define S_D_RDREQAFULL 8 +#define M_D_RDREQAFULL 0xfU +#define V_D_RDREQAFULL(x) ((x) << S_D_RDREQAFULL) +#define G_D_RDREQAFULL(x) (((x) >> S_D_RDREQAFULL) & M_D_RDREQAFULL) + +#define S_D_WRREQVLD 4 +#define M_D_WRREQVLD 0xfU +#define V_D_WRREQVLD(x) ((x) << S_D_WRREQVLD) +#define G_D_WRREQVLD(x) (((x) >> S_D_WRREQVLD) & M_D_WRREQVLD) + +#define S_D_WRREQAFULL 0 +#define M_D_WRREQAFULL 0xfU +#define V_D_WRREQAFULL(x) ((x) << S_D_WRREQAFULL) +#define G_D_WRREQAFULL(x) (((x) >> S_D_WRREQAFULL) & M_D_WRREQAFULL) + +#define A_PCIE_CDEBUG_REG_0X3 0x3 + +#define S_C_REQVLD 19 +#define M_C_REQVLD 0x1fffU +#define V_C_REQVLD(x) ((x) << S_C_REQVLD) +#define G_C_REQVLD(x) (((x) >> S_C_REQVLD) & M_C_REQVLD) + +#define S_C_RSPVLD2 16 +#define M_C_RSPVLD2 0x7U +#define V_C_RSPVLD2(x) ((x) << S_C_RSPVLD2) +#define G_C_RSPVLD2(x) (((x) >> S_C_RSPVLD2) & M_C_RSPVLD2) + +#define S_C_RSPVLD3 15 +#define V_C_RSPVLD3(x) ((x) << S_C_RSPVLD3) +#define F_C_RSPVLD3 V_C_RSPVLD3(1U) + +#define S_C_RSPVLD4 14 +#define V_C_RSPVLD4(x) ((x) << S_C_RSPVLD4) +#define F_C_RSPVLD4 V_C_RSPVLD4(1U) + +#define S_C_RSPVLD5 13 +#define V_C_RSPVLD5(x) ((x) << S_C_RSPVLD5) +#define F_C_RSPVLD5 V_C_RSPVLD5(1U) + +#define S_C_RSPVLD6 12 +#define V_C_RSPVLD6(x) ((x) << S_C_RSPVLD6) +#define F_C_RSPVLD6 V_C_RSPVLD6(1U) + +#define S_C_RSPVLD7 9 +#define M_C_RSPVLD7 0x7U +#define V_C_RSPVLD7(x) ((x) << S_C_RSPVLD7) +#define G_C_RSPVLD7(x) (((x) >> S_C_RSPVLD7) & M_C_RSPVLD7) + +#define S_C_RSPAFULL 6 +#define M_C_RSPAFULL 0x7U +#define V_C_RSPAFULL(x) ((x) << S_C_RSPAFULL) +#define G_C_RSPAFULL(x) (((x) >> S_C_RSPAFULL) & M_C_RSPAFULL) + +#define S_C_REQVLD8 3 +#define M_C_REQVLD8 0x7U +#define V_C_REQVLD8(x) ((x) << S_C_REQVLD8) +#define G_C_REQVLD8(x) (((x) >> S_C_REQVLD8) & M_C_REQVLD8) + +#define S_C_REQAFULL 0 +#define M_C_REQAFULL 0x7U +#define V_C_REQAFULL(x) ((x) << S_C_REQAFULL) +#define G_C_REQAFULL(x) (((x) >> S_C_REQAFULL) & M_C_REQAFULL) + +#define A_PCIE_CDEBUG_REG_0X4 0x4 + +#define S_H_REQVLD 7 +#define M_H_REQVLD 0x1ffffffU +#define V_H_REQVLD(x) ((x) << S_H_REQVLD) +#define G_H_REQVLD(x) (((x) >> S_H_REQVLD) & M_H_REQVLD) + +#define S_H_RSPVLD 6 +#define V_H_RSPVLD(x) ((x) << S_H_RSPVLD) +#define F_H_RSPVLD V_H_RSPVLD(1U) + +#define S_H_RSPVLD2 5 +#define V_H_RSPVLD2(x) ((x) << S_H_RSPVLD2) +#define F_H_RSPVLD2 V_H_RSPVLD2(1U) + +#define S_H_RSPVLD3 4 +#define V_H_RSPVLD3(x) ((x) << S_H_RSPVLD3) +#define F_H_RSPVLD3 V_H_RSPVLD3(1U) + +#define S_H_RSPVLD4 3 +#define V_H_RSPVLD4(x) ((x) << S_H_RSPVLD4) +#define F_H_RSPVLD4 V_H_RSPVLD4(1U) + +#define S_H_RSPAFULL 2 +#define V_H_RSPAFULL(x) ((x) << S_H_RSPAFULL) +#define F_H_RSPAFULL V_H_RSPAFULL(1U) + +#define S_H_REQVLD2 1 +#define V_H_REQVLD2(x) ((x) << S_H_REQVLD2) +#define F_H_REQVLD2 V_H_REQVLD2(1U) + +#define S_H_REQAFULL 0 +#define V_H_REQAFULL(x) ((x) << S_H_REQAFULL) +#define F_H_REQAFULL V_H_REQAFULL(1U) + +#define A_PCIE_CDEBUG_REG_0X5 0x5 + +#define S_ER_RSPVLD 16 +#define M_ER_RSPVLD 0xffffU +#define V_ER_RSPVLD(x) ((x) << S_ER_RSPVLD) +#define G_ER_RSPVLD(x) (((x) >> S_ER_RSPVLD) & M_ER_RSPVLD) + +#define S_ER_REQVLD2 5 +#define M_ER_REQVLD2 0x7ffU +#define V_ER_REQVLD2(x) ((x) << S_ER_REQVLD2) +#define G_ER_REQVLD2(x) (((x) >> S_ER_REQVLD2) & M_ER_REQVLD2) + +#define S_ER_REQVLD3 2 +#define M_ER_REQVLD3 0x7U +#define V_ER_REQVLD3(x) ((x) << S_ER_REQVLD3) +#define G_ER_REQVLD3(x) (((x) >> S_ER_REQVLD3) & M_ER_REQVLD3) + +#define S_ER_RSPVLD4 1 +#define V_ER_RSPVLD4(x) ((x) << S_ER_RSPVLD4) +#define F_ER_RSPVLD4 V_ER_RSPVLD4(1U) + +#define S_ER_REQVLD5 0 +#define V_ER_REQVLD5(x) ((x) << S_ER_REQVLD5) +#define F_ER_REQVLD5 V_ER_REQVLD5(1U) + +#define A_PCIE_CDEBUG_REG_0X6 0x6 + +#define S_PL_BAR2_REQVLD 4 +#define M_PL_BAR2_REQVLD 0xfffffffU +#define V_PL_BAR2_REQVLD(x) ((x) << S_PL_BAR2_REQVLD) +#define G_PL_BAR2_REQVLD(x) (((x) >> S_PL_BAR2_REQVLD) & M_PL_BAR2_REQVLD) + +#define S_PL_BAR2_REQVLD2 3 +#define V_PL_BAR2_REQVLD2(x) ((x) << S_PL_BAR2_REQVLD2) +#define F_PL_BAR2_REQVLD2 V_PL_BAR2_REQVLD2(1U) + +#define S_PL_BAR2_REQVLDE 2 +#define V_PL_BAR2_REQVLDE(x) ((x) << S_PL_BAR2_REQVLDE) +#define F_PL_BAR2_REQVLDE V_PL_BAR2_REQVLDE(1U) + +#define S_PL_BAR2_REQFULL 1 +#define V_PL_BAR2_REQFULL(x) ((x) << S_PL_BAR2_REQFULL) +#define F_PL_BAR2_REQFULL V_PL_BAR2_REQFULL(1U) + +#define S_PL_BAR2_REQVLD4 0 +#define V_PL_BAR2_REQVLD4(x) ((x) << S_PL_BAR2_REQVLD4) +#define F_PL_BAR2_REQVLD4 V_PL_BAR2_REQVLD4(1U) + +#define A_PCIE_CDEBUG_REG_0X7 0x7 +#define A_PCIE_CDEBUG_REG_0X8 0x8 +#define A_PCIE_CDEBUG_REG_0X9 0x9 +#define A_PCIE_CDEBUG_REG_0XA 0xa + +#define S_VPD_RSPVLD 20 +#define M_VPD_RSPVLD 0xfffU +#define V_VPD_RSPVLD(x) ((x) << S_VPD_RSPVLD) +#define G_VPD_RSPVLD(x) (((x) >> S_VPD_RSPVLD) & M_VPD_RSPVLD) + +#define S_VPD_REQVLD2 9 +#define M_VPD_REQVLD2 0x7ffU +#define V_VPD_REQVLD2(x) ((x) << S_VPD_REQVLD2) +#define G_VPD_REQVLD2(x) (((x) >> S_VPD_REQVLD2) & M_VPD_REQVLD2) + +#define S_VPD_REQVLD3 6 +#define M_VPD_REQVLD3 0x7U +#define V_VPD_REQVLD3(x) ((x) << S_VPD_REQVLD3) +#define G_VPD_REQVLD3(x) (((x) >> S_VPD_REQVLD3) & M_VPD_REQVLD3) + +#define S_VPD_REQVLD4 5 +#define V_VPD_REQVLD4(x) ((x) << S_VPD_REQVLD4) +#define F_VPD_REQVLD4 V_VPD_REQVLD4(1U) + +#define S_VPD_REQVLD5 3 +#define M_VPD_REQVLD5 0x3U +#define V_VPD_REQVLD5(x) ((x) << S_VPD_REQVLD5) +#define G_VPD_REQVLD5(x) (((x) >> S_VPD_REQVLD5) & M_VPD_REQVLD5) + +#define S_VPD_RSPVLD2 2 +#define V_VPD_RSPVLD2(x) ((x) << S_VPD_RSPVLD2) +#define F_VPD_RSPVLD2 V_VPD_RSPVLD2(1U) + +#define S_VPD_RSPVLD3 1 +#define V_VPD_RSPVLD3(x) ((x) << S_VPD_RSPVLD3) +#define F_VPD_RSPVLD3 V_VPD_RSPVLD3(1U) + +#define S_VPD_REQVLD6 0 +#define V_VPD_REQVLD6(x) ((x) << S_VPD_REQVLD6) +#define F_VPD_REQVLD6 V_VPD_REQVLD6(1U) + +#define A_PCIE_CDEBUG_REG_0XB 0xb + +#define S_MA_REQDATAVLD 28 +#define M_MA_REQDATAVLD 0xfU +#define V_MA_REQDATAVLD(x) ((x) << S_MA_REQDATAVLD) +#define G_MA_REQDATAVLD(x) (((x) >> S_MA_REQDATAVLD) & M_MA_REQDATAVLD) + +#define S_MA_REQADDRVLD 27 +#define V_MA_REQADDRVLD(x) ((x) << S_MA_REQADDRVLD) +#define F_MA_REQADDRVLD V_MA_REQADDRVLD(1U) + +#define S_MA_REQADDRVLD2 26 +#define V_MA_REQADDRVLD2(x) ((x) << S_MA_REQADDRVLD2) +#define F_MA_REQADDRVLD2 V_MA_REQADDRVLD2(1U) + +#define S_MA_RSPDATAVLD2 22 +#define M_MA_RSPDATAVLD2 0xfU +#define V_MA_RSPDATAVLD2(x) ((x) << S_MA_RSPDATAVLD2) +#define G_MA_RSPDATAVLD2(x) (((x) >> S_MA_RSPDATAVLD2) & M_MA_RSPDATAVLD2) + +#define S_MA_REQADDRVLD3 20 +#define M_MA_REQADDRVLD3 0x3U +#define V_MA_REQADDRVLD3(x) ((x) << S_MA_REQADDRVLD3) +#define G_MA_REQADDRVLD3(x) (((x) >> S_MA_REQADDRVLD3) & M_MA_REQADDRVLD3) + +#define S_MA_REQADDRVLD4 4 +#define M_MA_REQADDRVLD4 0xffffU +#define V_MA_REQADDRVLD4(x) ((x) << S_MA_REQADDRVLD4) +#define G_MA_REQADDRVLD4(x) (((x) >> S_MA_REQADDRVLD4) & M_MA_REQADDRVLD4) + +#define S_MA_REQADDRVLD5 3 +#define V_MA_REQADDRVLD5(x) ((x) << S_MA_REQADDRVLD5) +#define F_MA_REQADDRVLD5 V_MA_REQADDRVLD5(1U) + +#define S_MA_REQADDRVLD6 2 +#define V_MA_REQADDRVLD6(x) ((x) << S_MA_REQADDRVLD6) +#define F_MA_REQADDRVLD6 V_MA_REQADDRVLD6(1U) + +#define S_MA_REQADDRRDY 1 +#define V_MA_REQADDRRDY(x) ((x) << S_MA_REQADDRRDY) +#define F_MA_REQADDRRDY V_MA_REQADDRRDY(1U) + +#define S_MA_REQADDRVLD7 0 +#define V_MA_REQADDRVLD7(x) ((x) << S_MA_REQADDRVLD7) +#define F_MA_REQADDRVLD7 V_MA_REQADDRVLD7(1U) + +#define A_PCIE_CDEBUG_REG_0XC 0xc +#define A_PCIE_CDEBUG_REG_0XD 0xd +#define A_PCIE_CDEBUG_REG_0XE 0xe +#define A_PCIE_CDEBUG_REG_0XF 0xf +#define A_PCIE_CDEBUG_REG_0X10 0x10 +#define A_PCIE_CDEBUG_REG_0X11 0x11 +#define A_PCIE_CDEBUG_REG_0X12 0x12 +#define A_PCIE_CDEBUG_REG_0X13 0x13 +#define A_PCIE_CDEBUG_REG_0X14 0x14 +#define A_PCIE_CDEBUG_REG_0X15 0x15 + +#define S_PLM_REQVLD 19 +#define M_PLM_REQVLD 0x1fffU +#define V_PLM_REQVLD(x) ((x) << S_PLM_REQVLD) +#define G_PLM_REQVLD(x) (((x) >> S_PLM_REQVLD) & M_PLM_REQVLD) + +#define S_PLM_REQVLD2 18 +#define V_PLM_REQVLD2(x) ((x) << S_PLM_REQVLD2) +#define F_PLM_REQVLD2 V_PLM_REQVLD2(1U) + +#define S_PLM_RSPVLD3 17 +#define V_PLM_RSPVLD3(x) ((x) << S_PLM_RSPVLD3) +#define F_PLM_RSPVLD3 V_PLM_RSPVLD3(1U) + +#define S_PLM_REQVLD4 16 +#define V_PLM_REQVLD4(x) ((x) << S_PLM_REQVLD4) +#define F_PLM_REQVLD4 V_PLM_REQVLD4(1U) + +#define S_PLM_REQVLD5 15 +#define V_PLM_REQVLD5(x) ((x) << S_PLM_REQVLD5) +#define F_PLM_REQVLD5 V_PLM_REQVLD5(1U) + +#define S_PLM_REQVLD6 14 +#define V_PLM_REQVLD6(x) ((x) << S_PLM_REQVLD6) +#define F_PLM_REQVLD6 V_PLM_REQVLD6(1U) + +#define S_PLM_REQVLD7 13 +#define V_PLM_REQVLD7(x) ((x) << S_PLM_REQVLD7) +#define F_PLM_REQVLD7 V_PLM_REQVLD7(1U) + +#define S_PLM_REQVLD8 12 +#define V_PLM_REQVLD8(x) ((x) << S_PLM_REQVLD8) +#define F_PLM_REQVLD8 V_PLM_REQVLD8(1U) + +#define S_PLM_REQVLD9 4 +#define M_PLM_REQVLD9 0xffU +#define V_PLM_REQVLD9(x) ((x) << S_PLM_REQVLD9) +#define G_PLM_REQVLD9(x) (((x) >> S_PLM_REQVLD9) & M_PLM_REQVLD9) + +#define S_PLM_REQVLDA 1 +#define M_PLM_REQVLDA 0x7U +#define V_PLM_REQVLDA(x) ((x) << S_PLM_REQVLDA) +#define G_PLM_REQVLDA(x) (((x) >> S_PLM_REQVLDA) & M_PLM_REQVLDA) + +#define S_PLM_REQVLDB 0 +#define V_PLM_REQVLDB(x) ((x) << S_PLM_REQVLDB) +#define F_PLM_REQVLDB V_PLM_REQVLDB(1U) + +#define A_PCIE_CDEBUG_REG_0X16 0x16 +#define A_PCIE_CDEBUG_REG_0X17 0x17 +#define A_PCIE_CDEBUG_REG_0X18 0x18 +#define A_PCIE_CDEBUG_REG_0X19 0x19 +#define A_PCIE_CDEBUG_REG_0X1A 0x1a +#define A_PCIE_CDEBUG_REG_0X1B 0x1b +#define A_PCIE_CDEBUG_REG_0X1C 0x1c +#define A_PCIE_CDEBUG_REG_0X1D 0x1d +#define A_PCIE_CDEBUG_REG_0X1E 0x1e +#define A_PCIE_CDEBUG_REG_0X1F 0x1f +#define A_PCIE_CDEBUG_REG_0X20 0x20 +#define A_PCIE_CDEBUG_REG_0X21 0x21 +#define A_PCIE_CDEBUG_REG_0X22 0x22 +#define A_PCIE_CDEBUG_REG_0X23 0x23 +#define A_PCIE_CDEBUG_REG_0X24 0x24 +#define A_PCIE_CDEBUG_REG_0X25 0x25 +#define A_PCIE_CDEBUG_REG_0X26 0x26 +#define A_PCIE_CDEBUG_REG_0X27 0x27 +#define A_PCIE_CDEBUG_REG_0X28 0x28 +#define A_PCIE_CDEBUG_REG_0X29 0x29 +#define A_PCIE_CDEBUG_REG_0X2A 0x2a +#define A_PCIE_CDEBUG_REG_0X2B 0x2b +#define A_PCIE_CDEBUG_REG_0X2C 0x2c +#define A_PCIE_CDEBUG_REG_0X2D 0x2d +#define A_PCIE_CDEBUG_REG_0X2E 0x2e +#define A_PCIE_CDEBUG_REG_0X2F 0x2f +#define A_PCIE_CDEBUG_REG_0X30 0x30 +#define A_PCIE_CDEBUG_REG_0X31 0x31 +#define A_PCIE_CDEBUG_REG_0X32 0x32 +#define A_PCIE_CDEBUG_REG_0X33 0x33 +#define A_PCIE_CDEBUG_REG_0X34 0x34 +#define A_PCIE_CDEBUG_REG_0X35 0x35 +#define A_PCIE_CDEBUG_REG_0X36 0x36 +#define A_PCIE_CDEBUG_REG_0X37 0x37 /* registers for module DBG */ #define DBG_BASE_ADDR 0x6000 @@ -7998,6 +12379,11 @@ #define V_T5_P_OCLK_MUXSEL(x) ((x) << S_T5_P_OCLK_MUXSEL) #define G_T5_P_OCLK_MUXSEL(x) (((x) >> S_T5_P_OCLK_MUXSEL) & M_T5_P_OCLK_MUXSEL) +#define S_T6_P_OCLK_MUXSEL 13 +#define M_T6_P_OCLK_MUXSEL 0xfU +#define V_T6_P_OCLK_MUXSEL(x) ((x) << S_T6_P_OCLK_MUXSEL) +#define G_T6_P_OCLK_MUXSEL(x) (((x) >> S_T6_P_OCLK_MUXSEL) & M_T6_P_OCLK_MUXSEL) + #define A_DBG_TRACE0_CONF_COMPREG0 0x6060 #define A_DBG_TRACE0_CONF_COMPREG1 0x6064 #define A_DBG_TRACE1_CONF_COMPREG0 0x6068 @@ -8071,6 +12457,26 @@ #define V_RD_EN0(x) ((x) << S_RD_EN0) #define F_RD_EN0 V_RD_EN0(1U) +#define S_T5_RD_ADDR1 11 +#define M_T5_RD_ADDR1 0x1ffU +#define V_T5_RD_ADDR1(x) ((x) << S_T5_RD_ADDR1) +#define G_T5_RD_ADDR1(x) (((x) >> S_T5_RD_ADDR1) & M_T5_RD_ADDR1) + +#define S_T5_RD_ADDR0 2 +#define M_T5_RD_ADDR0 0x1ffU +#define V_T5_RD_ADDR0(x) ((x) << S_T5_RD_ADDR0) +#define G_T5_RD_ADDR0(x) (((x) >> S_T5_RD_ADDR0) & M_T5_RD_ADDR0) + +#define S_T6_RD_ADDR1 11 +#define M_T6_RD_ADDR1 0x1ffU +#define V_T6_RD_ADDR1(x) ((x) << S_T6_RD_ADDR1) +#define G_T6_RD_ADDR1(x) (((x) >> S_T6_RD_ADDR1) & M_T6_RD_ADDR1) + +#define S_T6_RD_ADDR0 2 +#define M_T6_RD_ADDR0 0x1ffU +#define V_T6_RD_ADDR0(x) ((x) << S_T6_RD_ADDR0) +#define G_T6_RD_ADDR0(x) (((x) >> S_T6_RD_ADDR0) & M_T6_RD_ADDR0) + #define A_DBG_TRACE_WRADDR 0x6090 #define S_WR_POINTER_ADDR1 16 @@ -8083,6 +12489,26 @@ #define V_WR_POINTER_ADDR0(x) ((x) << S_WR_POINTER_ADDR0) #define G_WR_POINTER_ADDR0(x) (((x) >> S_WR_POINTER_ADDR0) & M_WR_POINTER_ADDR0) +#define S_T5_WR_POINTER_ADDR1 16 +#define M_T5_WR_POINTER_ADDR1 0x1ffU +#define V_T5_WR_POINTER_ADDR1(x) ((x) << S_T5_WR_POINTER_ADDR1) +#define G_T5_WR_POINTER_ADDR1(x) (((x) >> S_T5_WR_POINTER_ADDR1) & M_T5_WR_POINTER_ADDR1) + +#define S_T5_WR_POINTER_ADDR0 0 +#define M_T5_WR_POINTER_ADDR0 0x1ffU +#define V_T5_WR_POINTER_ADDR0(x) ((x) << S_T5_WR_POINTER_ADDR0) +#define G_T5_WR_POINTER_ADDR0(x) (((x) >> S_T5_WR_POINTER_ADDR0) & M_T5_WR_POINTER_ADDR0) + +#define S_T6_WR_POINTER_ADDR1 16 +#define M_T6_WR_POINTER_ADDR1 0x1ffU +#define V_T6_WR_POINTER_ADDR1(x) ((x) << S_T6_WR_POINTER_ADDR1) +#define G_T6_WR_POINTER_ADDR1(x) (((x) >> S_T6_WR_POINTER_ADDR1) & M_T6_WR_POINTER_ADDR1) + +#define S_T6_WR_POINTER_ADDR0 0 +#define M_T6_WR_POINTER_ADDR0 0x1ffU +#define V_T6_WR_POINTER_ADDR0(x) ((x) << S_T6_WR_POINTER_ADDR0) +#define G_T6_WR_POINTER_ADDR0(x) (((x) >> S_T6_WR_POINTER_ADDR0) & M_T6_WR_POINTER_ADDR0) + #define A_DBG_TRACE0_DATA_OUT 0x6094 #define A_DBG_TRACE1_DATA_OUT 0x6098 #define A_DBG_FUSE_SENSE_DONE 0x609c @@ -8137,6 +12563,18 @@ #define V_TVSENSE_RATIO(x) ((x) << S_TVSENSE_RATIO) #define G_TVSENSE_RATIO(x) (((x) >> S_TVSENSE_RATIO) & M_TVSENSE_RATIO) +#define S_T6_TVSENSE_SLEEP 11 +#define V_T6_TVSENSE_SLEEP(x) ((x) << S_T6_TVSENSE_SLEEP) +#define F_T6_TVSENSE_SLEEP V_T6_TVSENSE_SLEEP(1U) + +#define S_T6_TVSENSE_SENSV 10 +#define V_T6_TVSENSE_SENSV(x) ((x) << S_T6_TVSENSE_SENSV) +#define F_T6_TVSENSE_SENSV V_T6_TVSENSE_SENSV(1U) + +#define S_T6_TVSENSE_RST 9 +#define V_T6_TVSENSE_RST(x) ((x) << S_T6_TVSENSE_RST) +#define F_T6_TVSENSE_RST V_T6_TVSENSE_RST(1U) + #define A_DBG_CUST_EFUSE_OUT_EN 0x60ac #define A_DBG_CUST_EFUSE_SEL1_EN 0x60b0 #define A_DBG_CUST_EFUSE_SEL2_EN 0x60b4 @@ -8188,6 +12626,18 @@ #define V_T5_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_T5_STATIC_M_PLL_FFSLEWRATE) #define G_T5_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_T5_STATIC_M_PLL_FFSLEWRATE) & M_T5_STATIC_M_PLL_FFSLEWRATE) +#define A_DBG_STATIC_M_PLL_CONF1 0x60b8 + +#define S_STATIC_M_PLL_MULTFRAC 8 +#define M_STATIC_M_PLL_MULTFRAC 0xffffffU +#define V_STATIC_M_PLL_MULTFRAC(x) ((x) << S_STATIC_M_PLL_MULTFRAC) +#define G_STATIC_M_PLL_MULTFRAC(x) (((x) >> S_STATIC_M_PLL_MULTFRAC) & M_STATIC_M_PLL_MULTFRAC) + +#define S_STATIC_M_PLL_FFSLEWRATE 0 +#define M_STATIC_M_PLL_FFSLEWRATE 0xffU +#define V_STATIC_M_PLL_FFSLEWRATE(x) ((x) << S_STATIC_M_PLL_FFSLEWRATE) +#define G_STATIC_M_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_M_PLL_FFSLEWRATE) & M_STATIC_M_PLL_FFSLEWRATE) + #define A_DBG_T5_STATIC_M_PLL_CONF2 0x60bc #define S_T5_STATIC_M_PLL_DCO_BYPASS 23 @@ -8224,6 +12674,47 @@ #define V_T5_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_T5_STATIC_M_PLL_LOCKTUNE) #define G_T5_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_T5_STATIC_M_PLL_LOCKTUNE) & M_T5_STATIC_M_PLL_LOCKTUNE) +#define A_DBG_STATIC_M_PLL_CONF2 0x60bc + +#define S_T6_STATIC_M_PLL_PREDIV 24 +#define M_T6_STATIC_M_PLL_PREDIV 0x3fU +#define V_T6_STATIC_M_PLL_PREDIV(x) ((x) << S_T6_STATIC_M_PLL_PREDIV) +#define G_T6_STATIC_M_PLL_PREDIV(x) (((x) >> S_T6_STATIC_M_PLL_PREDIV) & M_T6_STATIC_M_PLL_PREDIV) + +#define S_STATIC_M_PLL_DCO_BYPASS 23 +#define V_STATIC_M_PLL_DCO_BYPASS(x) ((x) << S_STATIC_M_PLL_DCO_BYPASS) +#define F_STATIC_M_PLL_DCO_BYPASS V_STATIC_M_PLL_DCO_BYPASS(1U) + +#define S_STATIC_M_PLL_SDORDER 21 +#define M_STATIC_M_PLL_SDORDER 0x3U +#define V_STATIC_M_PLL_SDORDER(x) ((x) << S_STATIC_M_PLL_SDORDER) +#define G_STATIC_M_PLL_SDORDER(x) (((x) >> S_STATIC_M_PLL_SDORDER) & M_STATIC_M_PLL_SDORDER) + +#define S_STATIC_M_PLL_FFENABLE 20 +#define V_STATIC_M_PLL_FFENABLE(x) ((x) << S_STATIC_M_PLL_FFENABLE) +#define F_STATIC_M_PLL_FFENABLE V_STATIC_M_PLL_FFENABLE(1U) + +#define S_STATIC_M_PLL_STOPCLKB 19 +#define V_STATIC_M_PLL_STOPCLKB(x) ((x) << S_STATIC_M_PLL_STOPCLKB) +#define F_STATIC_M_PLL_STOPCLKB V_STATIC_M_PLL_STOPCLKB(1U) + +#define S_STATIC_M_PLL_STOPCLKA 18 +#define V_STATIC_M_PLL_STOPCLKA(x) ((x) << S_STATIC_M_PLL_STOPCLKA) +#define F_STATIC_M_PLL_STOPCLKA V_STATIC_M_PLL_STOPCLKA(1U) + +#define S_T6_STATIC_M_PLL_SLEEP 17 +#define V_T6_STATIC_M_PLL_SLEEP(x) ((x) << S_T6_STATIC_M_PLL_SLEEP) +#define F_T6_STATIC_M_PLL_SLEEP V_T6_STATIC_M_PLL_SLEEP(1U) + +#define S_T6_STATIC_M_PLL_BYPASS 16 +#define V_T6_STATIC_M_PLL_BYPASS(x) ((x) << S_T6_STATIC_M_PLL_BYPASS) +#define F_T6_STATIC_M_PLL_BYPASS V_T6_STATIC_M_PLL_BYPASS(1U) + +#define S_STATIC_M_PLL_LOCKTUNE 0 +#define M_STATIC_M_PLL_LOCKTUNE 0x1fU +#define V_STATIC_M_PLL_LOCKTUNE(x) ((x) << S_STATIC_M_PLL_LOCKTUNE) +#define G_STATIC_M_PLL_LOCKTUNE(x) (((x) >> S_STATIC_M_PLL_LOCKTUNE) & M_STATIC_M_PLL_LOCKTUNE) + #define A_DBG_T5_STATIC_M_PLL_CONF3 0x60c0 #define S_T5_STATIC_M_PLL_MULTPRE 30 @@ -8256,7 +12747,39 @@ #define V_T5_STATIC_M_PLL_RANGEA(x) ((x) << S_T5_STATIC_M_PLL_RANGEA) #define G_T5_STATIC_M_PLL_RANGEA(x) (((x) >> S_T5_STATIC_M_PLL_RANGEA) & M_T5_STATIC_M_PLL_RANGEA) +#define A_DBG_STATIC_M_PLL_CONF3 0x60c0 + +#define S_STATIC_M_PLL_MULTPRE 30 +#define M_STATIC_M_PLL_MULTPRE 0x3U +#define V_STATIC_M_PLL_MULTPRE(x) ((x) << S_STATIC_M_PLL_MULTPRE) +#define G_STATIC_M_PLL_MULTPRE(x) (((x) >> S_STATIC_M_PLL_MULTPRE) & M_STATIC_M_PLL_MULTPRE) + +#define S_STATIC_M_PLL_LOCKSEL 28 +#define V_STATIC_M_PLL_LOCKSEL(x) ((x) << S_STATIC_M_PLL_LOCKSEL) +#define F_STATIC_M_PLL_LOCKSEL V_STATIC_M_PLL_LOCKSEL(1U) + +#define S_STATIC_M_PLL_FFTUNE 12 +#define M_STATIC_M_PLL_FFTUNE 0xffffU +#define V_STATIC_M_PLL_FFTUNE(x) ((x) << S_STATIC_M_PLL_FFTUNE) +#define G_STATIC_M_PLL_FFTUNE(x) (((x) >> S_STATIC_M_PLL_FFTUNE) & M_STATIC_M_PLL_FFTUNE) + +#define S_STATIC_M_PLL_RANGEPRE 10 +#define M_STATIC_M_PLL_RANGEPRE 0x3U +#define V_STATIC_M_PLL_RANGEPRE(x) ((x) << S_STATIC_M_PLL_RANGEPRE) +#define G_STATIC_M_PLL_RANGEPRE(x) (((x) >> S_STATIC_M_PLL_RANGEPRE) & M_STATIC_M_PLL_RANGEPRE) + +#define S_T6_STATIC_M_PLL_RANGEB 5 +#define M_T6_STATIC_M_PLL_RANGEB 0x1fU +#define V_T6_STATIC_M_PLL_RANGEB(x) ((x) << S_T6_STATIC_M_PLL_RANGEB) +#define G_T6_STATIC_M_PLL_RANGEB(x) (((x) >> S_T6_STATIC_M_PLL_RANGEB) & M_T6_STATIC_M_PLL_RANGEB) + +#define S_T6_STATIC_M_PLL_RANGEA 0 +#define M_T6_STATIC_M_PLL_RANGEA 0x1fU +#define V_T6_STATIC_M_PLL_RANGEA(x) ((x) << S_T6_STATIC_M_PLL_RANGEA) +#define G_T6_STATIC_M_PLL_RANGEA(x) (((x) >> S_T6_STATIC_M_PLL_RANGEA) & M_T6_STATIC_M_PLL_RANGEA) + #define A_DBG_T5_STATIC_M_PLL_CONF4 0x60c4 +#define A_DBG_STATIC_M_PLL_CONF4 0x60c4 #define A_DBG_T5_STATIC_M_PLL_CONF5 0x60c8 #define S_T5_STATIC_M_PLL_VCVTUNE 24 @@ -8287,6 +12810,31 @@ #define V_T5_STATIC_M_PLL_MULT(x) ((x) << S_T5_STATIC_M_PLL_MULT) #define G_T5_STATIC_M_PLL_MULT(x) (((x) >> S_T5_STATIC_M_PLL_MULT) & M_T5_STATIC_M_PLL_MULT) +#define A_DBG_STATIC_M_PLL_CONF5 0x60c8 + +#define S_STATIC_M_PLL_VCVTUNE 24 +#define M_STATIC_M_PLL_VCVTUNE 0x7U +#define V_STATIC_M_PLL_VCVTUNE(x) ((x) << S_STATIC_M_PLL_VCVTUNE) +#define G_STATIC_M_PLL_VCVTUNE(x) (((x) >> S_STATIC_M_PLL_VCVTUNE) & M_STATIC_M_PLL_VCVTUNE) + +#define S_T6_STATIC_M_PLL_RESET 23 +#define V_T6_STATIC_M_PLL_RESET(x) ((x) << S_T6_STATIC_M_PLL_RESET) +#define F_T6_STATIC_M_PLL_RESET V_T6_STATIC_M_PLL_RESET(1U) + +#define S_STATIC_MPLL_REFCLK_SEL 22 +#define V_STATIC_MPLL_REFCLK_SEL(x) ((x) << S_STATIC_MPLL_REFCLK_SEL) +#define F_STATIC_MPLL_REFCLK_SEL V_STATIC_MPLL_REFCLK_SEL(1U) + +#define S_STATIC_M_PLL_LFTUNE_32_40 13 +#define M_STATIC_M_PLL_LFTUNE_32_40 0x1ffU +#define V_STATIC_M_PLL_LFTUNE_32_40(x) ((x) << S_STATIC_M_PLL_LFTUNE_32_40) +#define G_STATIC_M_PLL_LFTUNE_32_40(x) (((x) >> S_STATIC_M_PLL_LFTUNE_32_40) & M_STATIC_M_PLL_LFTUNE_32_40) + +#define S_T6_STATIC_M_PLL_MULT 0 +#define M_T6_STATIC_M_PLL_MULT 0xffU +#define V_T6_STATIC_M_PLL_MULT(x) ((x) << S_T6_STATIC_M_PLL_MULT) +#define G_T6_STATIC_M_PLL_MULT(x) (((x) >> S_T6_STATIC_M_PLL_MULT) & M_T6_STATIC_M_PLL_MULT) + #define A_DBG_T5_STATIC_M_PLL_CONF6 0x60cc #define S_T5_STATIC_PHY0RECRST_ 5 @@ -8313,6 +12861,58 @@ #define V_T5_STATIC_SWMC1CFGRST_(x) ((x) << S_T5_STATIC_SWMC1CFGRST_) #define F_T5_STATIC_SWMC1CFGRST_ V_T5_STATIC_SWMC1CFGRST_(1U) +#define A_DBG_STATIC_M_PLL_CONF6 0x60cc + +#define S_STATIC_M_PLL_DIVCHANGE 30 +#define V_STATIC_M_PLL_DIVCHANGE(x) ((x) << S_STATIC_M_PLL_DIVCHANGE) +#define F_STATIC_M_PLL_DIVCHANGE V_STATIC_M_PLL_DIVCHANGE(1U) + +#define S_STATIC_M_PLL_FRAMESTOP 29 +#define V_STATIC_M_PLL_FRAMESTOP(x) ((x) << S_STATIC_M_PLL_FRAMESTOP) +#define F_STATIC_M_PLL_FRAMESTOP V_STATIC_M_PLL_FRAMESTOP(1U) + +#define S_STATIC_M_PLL_FASTSTOP 28 +#define V_STATIC_M_PLL_FASTSTOP(x) ((x) << S_STATIC_M_PLL_FASTSTOP) +#define F_STATIC_M_PLL_FASTSTOP V_STATIC_M_PLL_FASTSTOP(1U) + +#define S_STATIC_M_PLL_FFBYPASS 27 +#define V_STATIC_M_PLL_FFBYPASS(x) ((x) << S_STATIC_M_PLL_FFBYPASS) +#define F_STATIC_M_PLL_FFBYPASS V_STATIC_M_PLL_FFBYPASS(1U) + +#define S_STATIC_M_PLL_STARTUP 25 +#define M_STATIC_M_PLL_STARTUP 0x3U +#define V_STATIC_M_PLL_STARTUP(x) ((x) << S_STATIC_M_PLL_STARTUP) +#define G_STATIC_M_PLL_STARTUP(x) (((x) >> S_STATIC_M_PLL_STARTUP) & M_STATIC_M_PLL_STARTUP) + +#define S_STATIC_M_PLL_VREGTUNE 6 +#define M_STATIC_M_PLL_VREGTUNE 0x7ffffU +#define V_STATIC_M_PLL_VREGTUNE(x) ((x) << S_STATIC_M_PLL_VREGTUNE) +#define G_STATIC_M_PLL_VREGTUNE(x) (((x) >> S_STATIC_M_PLL_VREGTUNE) & M_STATIC_M_PLL_VREGTUNE) + +#define S_STATIC_PHY0RECRST_ 5 +#define V_STATIC_PHY0RECRST_(x) ((x) << S_STATIC_PHY0RECRST_) +#define F_STATIC_PHY0RECRST_ V_STATIC_PHY0RECRST_(1U) + +#define S_STATIC_PHY1RECRST_ 4 +#define V_STATIC_PHY1RECRST_(x) ((x) << S_STATIC_PHY1RECRST_) +#define F_STATIC_PHY1RECRST_ V_STATIC_PHY1RECRST_(1U) + +#define S_STATIC_SWMC0RST_ 3 +#define V_STATIC_SWMC0RST_(x) ((x) << S_STATIC_SWMC0RST_) +#define F_STATIC_SWMC0RST_ V_STATIC_SWMC0RST_(1U) + +#define S_STATIC_SWMC0CFGRST_ 2 +#define V_STATIC_SWMC0CFGRST_(x) ((x) << S_STATIC_SWMC0CFGRST_) +#define F_STATIC_SWMC0CFGRST_ V_STATIC_SWMC0CFGRST_(1U) + +#define S_STATIC_SWMC1RST_ 1 +#define V_STATIC_SWMC1RST_(x) ((x) << S_STATIC_SWMC1RST_) +#define F_STATIC_SWMC1RST_ V_STATIC_SWMC1RST_(1U) + +#define S_STATIC_SWMC1CFGRST_ 0 +#define V_STATIC_SWMC1CFGRST_(x) ((x) << S_STATIC_SWMC1CFGRST_) +#define F_STATIC_SWMC1CFGRST_ V_STATIC_SWMC1CFGRST_(1U) + #define A_DBG_T5_STATIC_C_PLL_CONF1 0x60d0 #define S_T5_STATIC_C_PLL_MULTFRAC 8 @@ -8325,6 +12925,18 @@ #define V_T5_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_T5_STATIC_C_PLL_FFSLEWRATE) #define G_T5_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_T5_STATIC_C_PLL_FFSLEWRATE) & M_T5_STATIC_C_PLL_FFSLEWRATE) +#define A_DBG_STATIC_C_PLL_CONF1 0x60d0 + +#define S_STATIC_C_PLL_MULTFRAC 8 +#define M_STATIC_C_PLL_MULTFRAC 0xffffffU +#define V_STATIC_C_PLL_MULTFRAC(x) ((x) << S_STATIC_C_PLL_MULTFRAC) +#define G_STATIC_C_PLL_MULTFRAC(x) (((x) >> S_STATIC_C_PLL_MULTFRAC) & M_STATIC_C_PLL_MULTFRAC) + +#define S_STATIC_C_PLL_FFSLEWRATE 0 +#define M_STATIC_C_PLL_FFSLEWRATE 0xffU +#define V_STATIC_C_PLL_FFSLEWRATE(x) ((x) << S_STATIC_C_PLL_FFSLEWRATE) +#define G_STATIC_C_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_C_PLL_FFSLEWRATE) & M_STATIC_C_PLL_FFSLEWRATE) + #define A_DBG_T5_STATIC_C_PLL_CONF2 0x60d4 #define S_T5_STATIC_C_PLL_DCO_BYPASS 23 @@ -8361,6 +12973,52 @@ #define V_T5_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_T5_STATIC_C_PLL_LOCKTUNE) #define G_T5_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_T5_STATIC_C_PLL_LOCKTUNE) & M_T5_STATIC_C_PLL_LOCKTUNE) +#define A_DBG_STATIC_C_PLL_CONF2 0x60d4 + +#define S_T6_STATIC_C_PLL_PREDIV 26 +#define M_T6_STATIC_C_PLL_PREDIV 0x3fU +#define V_T6_STATIC_C_PLL_PREDIV(x) ((x) << S_T6_STATIC_C_PLL_PREDIV) +#define G_T6_STATIC_C_PLL_PREDIV(x) (((x) >> S_T6_STATIC_C_PLL_PREDIV) & M_T6_STATIC_C_PLL_PREDIV) + +#define S_STATIC_C_PLL_STARTUP 24 +#define M_STATIC_C_PLL_STARTUP 0x3U +#define V_STATIC_C_PLL_STARTUP(x) ((x) << S_STATIC_C_PLL_STARTUP) +#define G_STATIC_C_PLL_STARTUP(x) (((x) >> S_STATIC_C_PLL_STARTUP) & M_STATIC_C_PLL_STARTUP) + +#define S_STATIC_C_PLL_DCO_BYPASS 23 +#define V_STATIC_C_PLL_DCO_BYPASS(x) ((x) << S_STATIC_C_PLL_DCO_BYPASS) +#define F_STATIC_C_PLL_DCO_BYPASS V_STATIC_C_PLL_DCO_BYPASS(1U) + +#define S_STATIC_C_PLL_SDORDER 21 +#define M_STATIC_C_PLL_SDORDER 0x3U +#define V_STATIC_C_PLL_SDORDER(x) ((x) << S_STATIC_C_PLL_SDORDER) +#define G_STATIC_C_PLL_SDORDER(x) (((x) >> S_STATIC_C_PLL_SDORDER) & M_STATIC_C_PLL_SDORDER) + +#define S_STATIC_C_PLL_DIVCHANGE 20 +#define V_STATIC_C_PLL_DIVCHANGE(x) ((x) << S_STATIC_C_PLL_DIVCHANGE) +#define F_STATIC_C_PLL_DIVCHANGE V_STATIC_C_PLL_DIVCHANGE(1U) + +#define S_STATIC_C_PLL_STOPCLKB 19 +#define V_STATIC_C_PLL_STOPCLKB(x) ((x) << S_STATIC_C_PLL_STOPCLKB) +#define F_STATIC_C_PLL_STOPCLKB V_STATIC_C_PLL_STOPCLKB(1U) + +#define S_STATIC_C_PLL_STOPCLKA 18 +#define V_STATIC_C_PLL_STOPCLKA(x) ((x) << S_STATIC_C_PLL_STOPCLKA) +#define F_STATIC_C_PLL_STOPCLKA V_STATIC_C_PLL_STOPCLKA(1U) + +#define S_T6_STATIC_C_PLL_SLEEP 17 +#define V_T6_STATIC_C_PLL_SLEEP(x) ((x) << S_T6_STATIC_C_PLL_SLEEP) +#define F_T6_STATIC_C_PLL_SLEEP V_T6_STATIC_C_PLL_SLEEP(1U) + +#define S_T6_STATIC_C_PLL_BYPASS 16 +#define V_T6_STATIC_C_PLL_BYPASS(x) ((x) << S_T6_STATIC_C_PLL_BYPASS) +#define F_T6_STATIC_C_PLL_BYPASS V_T6_STATIC_C_PLL_BYPASS(1U) + +#define S_STATIC_C_PLL_LOCKTUNE 0 +#define M_STATIC_C_PLL_LOCKTUNE 0x1fU +#define V_STATIC_C_PLL_LOCKTUNE(x) ((x) << S_STATIC_C_PLL_LOCKTUNE) +#define G_STATIC_C_PLL_LOCKTUNE(x) (((x) >> S_STATIC_C_PLL_LOCKTUNE) & M_STATIC_C_PLL_LOCKTUNE) + #define A_DBG_T5_STATIC_C_PLL_CONF3 0x60d8 #define S_T5_STATIC_C_PLL_MULTPRE 30 @@ -8393,7 +13051,39 @@ #define V_T5_STATIC_C_PLL_RANGEA(x) ((x) << S_T5_STATIC_C_PLL_RANGEA) #define G_T5_STATIC_C_PLL_RANGEA(x) (((x) >> S_T5_STATIC_C_PLL_RANGEA) & M_T5_STATIC_C_PLL_RANGEA) +#define A_DBG_STATIC_C_PLL_CONF3 0x60d8 + +#define S_STATIC_C_PLL_MULTPRE 30 +#define M_STATIC_C_PLL_MULTPRE 0x3U +#define V_STATIC_C_PLL_MULTPRE(x) ((x) << S_STATIC_C_PLL_MULTPRE) +#define G_STATIC_C_PLL_MULTPRE(x) (((x) >> S_STATIC_C_PLL_MULTPRE) & M_STATIC_C_PLL_MULTPRE) + +#define S_STATIC_C_PLL_LOCKSEL 28 +#define V_STATIC_C_PLL_LOCKSEL(x) ((x) << S_STATIC_C_PLL_LOCKSEL) +#define F_STATIC_C_PLL_LOCKSEL V_STATIC_C_PLL_LOCKSEL(1U) + +#define S_STATIC_C_PLL_FFTUNE 12 +#define M_STATIC_C_PLL_FFTUNE 0xffffU +#define V_STATIC_C_PLL_FFTUNE(x) ((x) << S_STATIC_C_PLL_FFTUNE) +#define G_STATIC_C_PLL_FFTUNE(x) (((x) >> S_STATIC_C_PLL_FFTUNE) & M_STATIC_C_PLL_FFTUNE) + +#define S_STATIC_C_PLL_RANGEPRE 10 +#define M_STATIC_C_PLL_RANGEPRE 0x3U +#define V_STATIC_C_PLL_RANGEPRE(x) ((x) << S_STATIC_C_PLL_RANGEPRE) +#define G_STATIC_C_PLL_RANGEPRE(x) (((x) >> S_STATIC_C_PLL_RANGEPRE) & M_STATIC_C_PLL_RANGEPRE) + +#define S_T6_STATIC_C_PLL_RANGEB 5 +#define M_T6_STATIC_C_PLL_RANGEB 0x1fU +#define V_T6_STATIC_C_PLL_RANGEB(x) ((x) << S_T6_STATIC_C_PLL_RANGEB) +#define G_T6_STATIC_C_PLL_RANGEB(x) (((x) >> S_T6_STATIC_C_PLL_RANGEB) & M_T6_STATIC_C_PLL_RANGEB) + +#define S_T6_STATIC_C_PLL_RANGEA 0 +#define M_T6_STATIC_C_PLL_RANGEA 0x1fU +#define V_T6_STATIC_C_PLL_RANGEA(x) ((x) << S_T6_STATIC_C_PLL_RANGEA) +#define G_T6_STATIC_C_PLL_RANGEA(x) (((x) >> S_T6_STATIC_C_PLL_RANGEA) & M_T6_STATIC_C_PLL_RANGEA) + #define A_DBG_T5_STATIC_C_PLL_CONF4 0x60dc +#define A_DBG_STATIC_C_PLL_CONF4 0x60dc #define A_DBG_T5_STATIC_C_PLL_CONF5 0x60e0 #define S_T5_STATIC_C_PLL_VCVTUNE 22 @@ -8416,6 +13106,40 @@ #define V_T5_STATIC_C_PLL_MULT(x) ((x) << S_T5_STATIC_C_PLL_MULT) #define G_T5_STATIC_C_PLL_MULT(x) (((x) >> S_T5_STATIC_C_PLL_MULT) & M_T5_STATIC_C_PLL_MULT) +#define A_DBG_STATIC_C_PLL_CONF5 0x60e0 + +#define S_STATIC_C_PLL_FFBYPASS 27 +#define V_STATIC_C_PLL_FFBYPASS(x) ((x) << S_STATIC_C_PLL_FFBYPASS) +#define F_STATIC_C_PLL_FFBYPASS V_STATIC_C_PLL_FFBYPASS(1U) + +#define S_STATIC_C_PLL_FASTSTOP 26 +#define V_STATIC_C_PLL_FASTSTOP(x) ((x) << S_STATIC_C_PLL_FASTSTOP) +#define F_STATIC_C_PLL_FASTSTOP V_STATIC_C_PLL_FASTSTOP(1U) + +#define S_STATIC_C_PLL_FRAMESTOP 25 +#define V_STATIC_C_PLL_FRAMESTOP(x) ((x) << S_STATIC_C_PLL_FRAMESTOP) +#define F_STATIC_C_PLL_FRAMESTOP V_STATIC_C_PLL_FRAMESTOP(1U) + +#define S_STATIC_C_PLL_VCVTUNE 22 +#define M_STATIC_C_PLL_VCVTUNE 0x7U +#define V_STATIC_C_PLL_VCVTUNE(x) ((x) << S_STATIC_C_PLL_VCVTUNE) +#define G_STATIC_C_PLL_VCVTUNE(x) (((x) >> S_STATIC_C_PLL_VCVTUNE) & M_STATIC_C_PLL_VCVTUNE) + +#define S_STATIC_C_PLL_LFTUNE_32_40 13 +#define M_STATIC_C_PLL_LFTUNE_32_40 0x1ffU +#define V_STATIC_C_PLL_LFTUNE_32_40(x) ((x) << S_STATIC_C_PLL_LFTUNE_32_40) +#define G_STATIC_C_PLL_LFTUNE_32_40(x) (((x) >> S_STATIC_C_PLL_LFTUNE_32_40) & M_STATIC_C_PLL_LFTUNE_32_40) + +#define S_STATIC_C_PLL_PREDIV_CNF5 8 +#define M_STATIC_C_PLL_PREDIV_CNF5 0x1fU +#define V_STATIC_C_PLL_PREDIV_CNF5(x) ((x) << S_STATIC_C_PLL_PREDIV_CNF5) +#define G_STATIC_C_PLL_PREDIV_CNF5(x) (((x) >> S_STATIC_C_PLL_PREDIV_CNF5) & M_STATIC_C_PLL_PREDIV_CNF5) + +#define S_T6_STATIC_C_PLL_MULT 0 +#define M_T6_STATIC_C_PLL_MULT 0xffU +#define V_T6_STATIC_C_PLL_MULT(x) ((x) << S_T6_STATIC_C_PLL_MULT) +#define G_T6_STATIC_C_PLL_MULT(x) (((x) >> S_T6_STATIC_C_PLL_MULT) & M_T6_STATIC_C_PLL_MULT) + #define A_DBG_T5_STATIC_U_PLL_CONF1 0x60e4 #define S_T5_STATIC_U_PLL_MULTFRAC 8 @@ -8428,6 +13152,18 @@ #define V_T5_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_T5_STATIC_U_PLL_FFSLEWRATE) #define G_T5_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_T5_STATIC_U_PLL_FFSLEWRATE) & M_T5_STATIC_U_PLL_FFSLEWRATE) +#define A_DBG_STATIC_U_PLL_CONF1 0x60e4 + +#define S_STATIC_U_PLL_MULTFRAC 8 +#define M_STATIC_U_PLL_MULTFRAC 0xffffffU +#define V_STATIC_U_PLL_MULTFRAC(x) ((x) << S_STATIC_U_PLL_MULTFRAC) +#define G_STATIC_U_PLL_MULTFRAC(x) (((x) >> S_STATIC_U_PLL_MULTFRAC) & M_STATIC_U_PLL_MULTFRAC) + +#define S_STATIC_U_PLL_FFSLEWRATE 0 +#define M_STATIC_U_PLL_FFSLEWRATE 0xffU +#define V_STATIC_U_PLL_FFSLEWRATE(x) ((x) << S_STATIC_U_PLL_FFSLEWRATE) +#define G_STATIC_U_PLL_FFSLEWRATE(x) (((x) >> S_STATIC_U_PLL_FFSLEWRATE) & M_STATIC_U_PLL_FFSLEWRATE) + #define A_DBG_T5_STATIC_U_PLL_CONF2 0x60e8 #define S_T5_STATIC_U_PLL_DCO_BYPASS 23 @@ -8464,6 +13200,52 @@ #define V_T5_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_T5_STATIC_U_PLL_LOCKTUNE) #define G_T5_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_T5_STATIC_U_PLL_LOCKTUNE) & M_T5_STATIC_U_PLL_LOCKTUNE) +#define A_DBG_STATIC_U_PLL_CONF2 0x60e8 + +#define S_T6_STATIC_U_PLL_PREDIV 26 +#define M_T6_STATIC_U_PLL_PREDIV 0x3fU +#define V_T6_STATIC_U_PLL_PREDIV(x) ((x) << S_T6_STATIC_U_PLL_PREDIV) +#define G_T6_STATIC_U_PLL_PREDIV(x) (((x) >> S_T6_STATIC_U_PLL_PREDIV) & M_T6_STATIC_U_PLL_PREDIV) + +#define S_STATIC_U_PLL_STARTUP 24 +#define M_STATIC_U_PLL_STARTUP 0x3U +#define V_STATIC_U_PLL_STARTUP(x) ((x) << S_STATIC_U_PLL_STARTUP) +#define G_STATIC_U_PLL_STARTUP(x) (((x) >> S_STATIC_U_PLL_STARTUP) & M_STATIC_U_PLL_STARTUP) + +#define S_STATIC_U_PLL_DCO_BYPASS 23 +#define V_STATIC_U_PLL_DCO_BYPASS(x) ((x) << S_STATIC_U_PLL_DCO_BYPASS) +#define F_STATIC_U_PLL_DCO_BYPASS V_STATIC_U_PLL_DCO_BYPASS(1U) + +#define S_STATIC_U_PLL_SDORDER 21 +#define M_STATIC_U_PLL_SDORDER 0x3U +#define V_STATIC_U_PLL_SDORDER(x) ((x) << S_STATIC_U_PLL_SDORDER) +#define G_STATIC_U_PLL_SDORDER(x) (((x) >> S_STATIC_U_PLL_SDORDER) & M_STATIC_U_PLL_SDORDER) + +#define S_STATIC_U_PLL_DIVCHANGE 20 +#define V_STATIC_U_PLL_DIVCHANGE(x) ((x) << S_STATIC_U_PLL_DIVCHANGE) +#define F_STATIC_U_PLL_DIVCHANGE V_STATIC_U_PLL_DIVCHANGE(1U) + +#define S_STATIC_U_PLL_STOPCLKB 19 +#define V_STATIC_U_PLL_STOPCLKB(x) ((x) << S_STATIC_U_PLL_STOPCLKB) +#define F_STATIC_U_PLL_STOPCLKB V_STATIC_U_PLL_STOPCLKB(1U) + +#define S_STATIC_U_PLL_STOPCLKA 18 +#define V_STATIC_U_PLL_STOPCLKA(x) ((x) << S_STATIC_U_PLL_STOPCLKA) +#define F_STATIC_U_PLL_STOPCLKA V_STATIC_U_PLL_STOPCLKA(1U) + +#define S_T6_STATIC_U_PLL_SLEEP 17 +#define V_T6_STATIC_U_PLL_SLEEP(x) ((x) << S_T6_STATIC_U_PLL_SLEEP) +#define F_T6_STATIC_U_PLL_SLEEP V_T6_STATIC_U_PLL_SLEEP(1U) + +#define S_T6_STATIC_U_PLL_BYPASS 16 +#define V_T6_STATIC_U_PLL_BYPASS(x) ((x) << S_T6_STATIC_U_PLL_BYPASS) +#define F_T6_STATIC_U_PLL_BYPASS V_T6_STATIC_U_PLL_BYPASS(1U) + +#define S_STATIC_U_PLL_LOCKTUNE 0 +#define M_STATIC_U_PLL_LOCKTUNE 0x1fU +#define V_STATIC_U_PLL_LOCKTUNE(x) ((x) << S_STATIC_U_PLL_LOCKTUNE) +#define G_STATIC_U_PLL_LOCKTUNE(x) (((x) >> S_STATIC_U_PLL_LOCKTUNE) & M_STATIC_U_PLL_LOCKTUNE) + #define A_DBG_T5_STATIC_U_PLL_CONF3 0x60ec #define S_T5_STATIC_U_PLL_MULTPRE 30 @@ -8496,7 +13278,39 @@ #define V_T5_STATIC_U_PLL_RANGEA(x) ((x) << S_T5_STATIC_U_PLL_RANGEA) #define G_T5_STATIC_U_PLL_RANGEA(x) (((x) >> S_T5_STATIC_U_PLL_RANGEA) & M_T5_STATIC_U_PLL_RANGEA) +#define A_DBG_STATIC_U_PLL_CONF3 0x60ec + +#define S_STATIC_U_PLL_MULTPRE 30 +#define M_STATIC_U_PLL_MULTPRE 0x3U +#define V_STATIC_U_PLL_MULTPRE(x) ((x) << S_STATIC_U_PLL_MULTPRE) +#define G_STATIC_U_PLL_MULTPRE(x) (((x) >> S_STATIC_U_PLL_MULTPRE) & M_STATIC_U_PLL_MULTPRE) + +#define S_STATIC_U_PLL_LOCKSEL 28 +#define V_STATIC_U_PLL_LOCKSEL(x) ((x) << S_STATIC_U_PLL_LOCKSEL) +#define F_STATIC_U_PLL_LOCKSEL V_STATIC_U_PLL_LOCKSEL(1U) + +#define S_STATIC_U_PLL_FFTUNE 12 +#define M_STATIC_U_PLL_FFTUNE 0xffffU +#define V_STATIC_U_PLL_FFTUNE(x) ((x) << S_STATIC_U_PLL_FFTUNE) +#define G_STATIC_U_PLL_FFTUNE(x) (((x) >> S_STATIC_U_PLL_FFTUNE) & M_STATIC_U_PLL_FFTUNE) + +#define S_STATIC_U_PLL_RANGEPRE 10 +#define M_STATIC_U_PLL_RANGEPRE 0x3U +#define V_STATIC_U_PLL_RANGEPRE(x) ((x) << S_STATIC_U_PLL_RANGEPRE) +#define G_STATIC_U_PLL_RANGEPRE(x) (((x) >> S_STATIC_U_PLL_RANGEPRE) & M_STATIC_U_PLL_RANGEPRE) + +#define S_T6_STATIC_U_PLL_RANGEB 5 +#define M_T6_STATIC_U_PLL_RANGEB 0x1fU +#define V_T6_STATIC_U_PLL_RANGEB(x) ((x) << S_T6_STATIC_U_PLL_RANGEB) +#define G_T6_STATIC_U_PLL_RANGEB(x) (((x) >> S_T6_STATIC_U_PLL_RANGEB) & M_T6_STATIC_U_PLL_RANGEB) + +#define S_T6_STATIC_U_PLL_RANGEA 0 +#define M_T6_STATIC_U_PLL_RANGEA 0x1fU +#define V_T6_STATIC_U_PLL_RANGEA(x) ((x) << S_T6_STATIC_U_PLL_RANGEA) +#define G_T6_STATIC_U_PLL_RANGEA(x) (((x) >> S_T6_STATIC_U_PLL_RANGEA) & M_T6_STATIC_U_PLL_RANGEA) + #define A_DBG_T5_STATIC_U_PLL_CONF4 0x60f0 +#define A_DBG_STATIC_U_PLL_CONF4 0x60f0 #define A_DBG_T5_STATIC_U_PLL_CONF5 0x60f4 #define S_T5_STATIC_U_PLL_VCVTUNE 22 @@ -8519,6 +13333,40 @@ #define V_T5_STATIC_U_PLL_MULT(x) ((x) << S_T5_STATIC_U_PLL_MULT) #define G_T5_STATIC_U_PLL_MULT(x) (((x) >> S_T5_STATIC_U_PLL_MULT) & M_T5_STATIC_U_PLL_MULT) +#define A_DBG_STATIC_U_PLL_CONF5 0x60f4 + +#define S_STATIC_U_PLL_FFBYPASS 27 +#define V_STATIC_U_PLL_FFBYPASS(x) ((x) << S_STATIC_U_PLL_FFBYPASS) +#define F_STATIC_U_PLL_FFBYPASS V_STATIC_U_PLL_FFBYPASS(1U) + +#define S_STATIC_U_PLL_FASTSTOP 26 +#define V_STATIC_U_PLL_FASTSTOP(x) ((x) << S_STATIC_U_PLL_FASTSTOP) +#define F_STATIC_U_PLL_FASTSTOP V_STATIC_U_PLL_FASTSTOP(1U) + +#define S_STATIC_U_PLL_FRAMESTOP 25 +#define V_STATIC_U_PLL_FRAMESTOP(x) ((x) << S_STATIC_U_PLL_FRAMESTOP) +#define F_STATIC_U_PLL_FRAMESTOP V_STATIC_U_PLL_FRAMESTOP(1U) + +#define S_STATIC_U_PLL_VCVTUNE 22 +#define M_STATIC_U_PLL_VCVTUNE 0x7U +#define V_STATIC_U_PLL_VCVTUNE(x) ((x) << S_STATIC_U_PLL_VCVTUNE) +#define G_STATIC_U_PLL_VCVTUNE(x) (((x) >> S_STATIC_U_PLL_VCVTUNE) & M_STATIC_U_PLL_VCVTUNE) + +#define S_STATIC_U_PLL_LFTUNE_32_40 13 +#define M_STATIC_U_PLL_LFTUNE_32_40 0x1ffU +#define V_STATIC_U_PLL_LFTUNE_32_40(x) ((x) << S_STATIC_U_PLL_LFTUNE_32_40) +#define G_STATIC_U_PLL_LFTUNE_32_40(x) (((x) >> S_STATIC_U_PLL_LFTUNE_32_40) & M_STATIC_U_PLL_LFTUNE_32_40) + +#define S_STATIC_U_PLL_PREDIV_CNF5 8 +#define M_STATIC_U_PLL_PREDIV_CNF5 0x1fU +#define V_STATIC_U_PLL_PREDIV_CNF5(x) ((x) << S_STATIC_U_PLL_PREDIV_CNF5) +#define G_STATIC_U_PLL_PREDIV_CNF5(x) (((x) >> S_STATIC_U_PLL_PREDIV_CNF5) & M_STATIC_U_PLL_PREDIV_CNF5) + +#define S_T6_STATIC_U_PLL_MULT 0 +#define M_T6_STATIC_U_PLL_MULT 0xffU +#define V_T6_STATIC_U_PLL_MULT(x) ((x) << S_T6_STATIC_U_PLL_MULT) +#define G_T6_STATIC_U_PLL_MULT(x) (((x) >> S_T6_STATIC_U_PLL_MULT) & M_T6_STATIC_U_PLL_MULT) + #define A_DBG_T5_STATIC_KR_PLL_CONF1 0x60f8 #define S_T5_STATIC_KR_PLL_BYPASS 30 @@ -8580,6 +13428,67 @@ #define V_T5_STATIC_KR_PLL_N1(x) ((x) << S_T5_STATIC_KR_PLL_N1) #define G_T5_STATIC_KR_PLL_N1(x) (((x) >> S_T5_STATIC_KR_PLL_N1) & M_T5_STATIC_KR_PLL_N1) +#define A_DBG_STATIC_KR_PLL_CONF1 0x60f8 + +#define S_T6_STATIC_KR_PLL_BYPASS 30 +#define V_T6_STATIC_KR_PLL_BYPASS(x) ((x) << S_T6_STATIC_KR_PLL_BYPASS) +#define F_T6_STATIC_KR_PLL_BYPASS V_T6_STATIC_KR_PLL_BYPASS(1U) + +#define S_STATIC_KR_PLL_VBOOSTDIV 27 +#define M_STATIC_KR_PLL_VBOOSTDIV 0x7U +#define V_STATIC_KR_PLL_VBOOSTDIV(x) ((x) << S_STATIC_KR_PLL_VBOOSTDIV) +#define G_STATIC_KR_PLL_VBOOSTDIV(x) (((x) >> S_STATIC_KR_PLL_VBOOSTDIV) & M_STATIC_KR_PLL_VBOOSTDIV) + +#define S_STATIC_KR_PLL_CPISEL 24 +#define M_STATIC_KR_PLL_CPISEL 0x7U +#define V_STATIC_KR_PLL_CPISEL(x) ((x) << S_STATIC_KR_PLL_CPISEL) +#define G_STATIC_KR_PLL_CPISEL(x) (((x) >> S_STATIC_KR_PLL_CPISEL) & M_STATIC_KR_PLL_CPISEL) + +#define S_STATIC_KR_PLL_CCALMETHOD 23 +#define V_STATIC_KR_PLL_CCALMETHOD(x) ((x) << S_STATIC_KR_PLL_CCALMETHOD) +#define F_STATIC_KR_PLL_CCALMETHOD V_STATIC_KR_PLL_CCALMETHOD(1U) + +#define S_STATIC_KR_PLL_CCALLOAD 22 +#define V_STATIC_KR_PLL_CCALLOAD(x) ((x) << S_STATIC_KR_PLL_CCALLOAD) +#define F_STATIC_KR_PLL_CCALLOAD V_STATIC_KR_PLL_CCALLOAD(1U) + +#define S_STATIC_KR_PLL_CCALFMIN 21 +#define V_STATIC_KR_PLL_CCALFMIN(x) ((x) << S_STATIC_KR_PLL_CCALFMIN) +#define F_STATIC_KR_PLL_CCALFMIN V_STATIC_KR_PLL_CCALFMIN(1U) + +#define S_STATIC_KR_PLL_CCALFMAX 20 +#define V_STATIC_KR_PLL_CCALFMAX(x) ((x) << S_STATIC_KR_PLL_CCALFMAX) +#define F_STATIC_KR_PLL_CCALFMAX V_STATIC_KR_PLL_CCALFMAX(1U) + +#define S_STATIC_KR_PLL_CCALCVHOLD 19 +#define V_STATIC_KR_PLL_CCALCVHOLD(x) ((x) << S_STATIC_KR_PLL_CCALCVHOLD) +#define F_STATIC_KR_PLL_CCALCVHOLD V_STATIC_KR_PLL_CCALCVHOLD(1U) + +#define S_STATIC_KR_PLL_CCALBANDSEL 15 +#define M_STATIC_KR_PLL_CCALBANDSEL 0xfU +#define V_STATIC_KR_PLL_CCALBANDSEL(x) ((x) << S_STATIC_KR_PLL_CCALBANDSEL) +#define G_STATIC_KR_PLL_CCALBANDSEL(x) (((x) >> S_STATIC_KR_PLL_CCALBANDSEL) & M_STATIC_KR_PLL_CCALBANDSEL) + +#define S_STATIC_KR_PLL_BGOFFSET 11 +#define M_STATIC_KR_PLL_BGOFFSET 0xfU +#define V_STATIC_KR_PLL_BGOFFSET(x) ((x) << S_STATIC_KR_PLL_BGOFFSET) +#define G_STATIC_KR_PLL_BGOFFSET(x) (((x) >> S_STATIC_KR_PLL_BGOFFSET) & M_STATIC_KR_PLL_BGOFFSET) + +#define S_T6_STATIC_KR_PLL_P 8 +#define M_T6_STATIC_KR_PLL_P 0x7U +#define V_T6_STATIC_KR_PLL_P(x) ((x) << S_T6_STATIC_KR_PLL_P) +#define G_T6_STATIC_KR_PLL_P(x) (((x) >> S_T6_STATIC_KR_PLL_P) & M_T6_STATIC_KR_PLL_P) + +#define S_T6_STATIC_KR_PLL_N2 4 +#define M_T6_STATIC_KR_PLL_N2 0xfU +#define V_T6_STATIC_KR_PLL_N2(x) ((x) << S_T6_STATIC_KR_PLL_N2) +#define G_T6_STATIC_KR_PLL_N2(x) (((x) >> S_T6_STATIC_KR_PLL_N2) & M_T6_STATIC_KR_PLL_N2) + +#define S_T6_STATIC_KR_PLL_N1 0 +#define M_T6_STATIC_KR_PLL_N1 0xfU +#define V_T6_STATIC_KR_PLL_N1(x) ((x) << S_T6_STATIC_KR_PLL_N1) +#define G_T6_STATIC_KR_PLL_N1(x) (((x) >> S_T6_STATIC_KR_PLL_N1) & M_T6_STATIC_KR_PLL_N1) + #define A_DBG_T5_STATIC_KR_PLL_CONF2 0x60fc #define S_T5_STATIC_KR_PLL_M 11 @@ -8592,6 +13501,18 @@ #define V_T5_STATIC_KR_PLL_ANALOGTUNE(x) ((x) << S_T5_STATIC_KR_PLL_ANALOGTUNE) #define G_T5_STATIC_KR_PLL_ANALOGTUNE(x) (((x) >> S_T5_STATIC_KR_PLL_ANALOGTUNE) & M_T5_STATIC_KR_PLL_ANALOGTUNE) +#define A_DBG_STATIC_KR_PLL_CONF2 0x60fc + +#define S_T6_STATIC_KR_PLL_M 11 +#define M_T6_STATIC_KR_PLL_M 0x1ffU +#define V_T6_STATIC_KR_PLL_M(x) ((x) << S_T6_STATIC_KR_PLL_M) +#define G_T6_STATIC_KR_PLL_M(x) (((x) >> S_T6_STATIC_KR_PLL_M) & M_T6_STATIC_KR_PLL_M) + +#define S_STATIC_KR_PLL_ANALOGTUNE 0 +#define M_STATIC_KR_PLL_ANALOGTUNE 0x7ffU +#define V_STATIC_KR_PLL_ANALOGTUNE(x) ((x) << S_STATIC_KR_PLL_ANALOGTUNE) +#define G_STATIC_KR_PLL_ANALOGTUNE(x) (((x) >> S_STATIC_KR_PLL_ANALOGTUNE) & M_STATIC_KR_PLL_ANALOGTUNE) + #define A_DBG_PVT_REG_CALIBRATE_CTL 0x6100 #define S_HALT_CALIBRATE 1 @@ -8668,21 +13589,21 @@ #define V_GPIO19_CHG_DET(x) ((x) << S_GPIO19_CHG_DET) #define F_GPIO19_CHG_DET V_GPIO19_CHG_DET(1U) -#define S_GPIO16_IN 3 -#define V_GPIO16_IN(x) ((x) << S_GPIO16_IN) -#define F_GPIO16_IN V_GPIO16_IN(1U) - -#define S_GPIO17_IN 2 -#define V_GPIO17_IN(x) ((x) << S_GPIO17_IN) -#define F_GPIO17_IN V_GPIO17_IN(1U) +#define S_GPIO19_IN 3 +#define V_GPIO19_IN(x) ((x) << S_GPIO19_IN) +#define F_GPIO19_IN V_GPIO19_IN(1U) -#define S_GPIO18_IN 1 +#define S_GPIO18_IN 2 #define V_GPIO18_IN(x) ((x) << S_GPIO18_IN) #define F_GPIO18_IN V_GPIO18_IN(1U) -#define S_GPIO19_IN 0 -#define V_GPIO19_IN(x) ((x) << S_GPIO19_IN) -#define F_GPIO19_IN V_GPIO19_IN(1U) +#define S_GPIO17_IN 1 +#define V_GPIO17_IN(x) ((x) << S_GPIO17_IN) +#define F_GPIO17_IN V_GPIO17_IN(1U) + +#define S_GPIO16_IN 0 +#define V_GPIO16_IN(x) ((x) << S_GPIO16_IN) +#define F_GPIO16_IN V_GPIO16_IN(1U) #define A_DBG_PVT_REG_LAST_MEASUREMENT 0x6108 @@ -8762,6 +13683,67 @@ #define V_T5_STATIC_KX_PLL_N1(x) ((x) << S_T5_STATIC_KX_PLL_N1) #define G_T5_STATIC_KX_PLL_N1(x) (((x) >> S_T5_STATIC_KX_PLL_N1) & M_T5_STATIC_KX_PLL_N1) +#define A_DBG_STATIC_KX_PLL_CONF1 0x6108 + +#define S_T6_STATIC_KX_PLL_BYPASS 30 +#define V_T6_STATIC_KX_PLL_BYPASS(x) ((x) << S_T6_STATIC_KX_PLL_BYPASS) +#define F_T6_STATIC_KX_PLL_BYPASS V_T6_STATIC_KX_PLL_BYPASS(1U) + +#define S_STATIC_KX_PLL_VBOOSTDIV 27 +#define M_STATIC_KX_PLL_VBOOSTDIV 0x7U +#define V_STATIC_KX_PLL_VBOOSTDIV(x) ((x) << S_STATIC_KX_PLL_VBOOSTDIV) +#define G_STATIC_KX_PLL_VBOOSTDIV(x) (((x) >> S_STATIC_KX_PLL_VBOOSTDIV) & M_STATIC_KX_PLL_VBOOSTDIV) + +#define S_STATIC_KX_PLL_CPISEL 24 +#define M_STATIC_KX_PLL_CPISEL 0x7U +#define V_STATIC_KX_PLL_CPISEL(x) ((x) << S_STATIC_KX_PLL_CPISEL) +#define G_STATIC_KX_PLL_CPISEL(x) (((x) >> S_STATIC_KX_PLL_CPISEL) & M_STATIC_KX_PLL_CPISEL) + +#define S_STATIC_KX_PLL_CCALMETHOD 23 +#define V_STATIC_KX_PLL_CCALMETHOD(x) ((x) << S_STATIC_KX_PLL_CCALMETHOD) +#define F_STATIC_KX_PLL_CCALMETHOD V_STATIC_KX_PLL_CCALMETHOD(1U) + +#define S_STATIC_KX_PLL_CCALLOAD 22 +#define V_STATIC_KX_PLL_CCALLOAD(x) ((x) << S_STATIC_KX_PLL_CCALLOAD) +#define F_STATIC_KX_PLL_CCALLOAD V_STATIC_KX_PLL_CCALLOAD(1U) + +#define S_STATIC_KX_PLL_CCALFMIN 21 +#define V_STATIC_KX_PLL_CCALFMIN(x) ((x) << S_STATIC_KX_PLL_CCALFMIN) +#define F_STATIC_KX_PLL_CCALFMIN V_STATIC_KX_PLL_CCALFMIN(1U) + +#define S_STATIC_KX_PLL_CCALFMAX 20 +#define V_STATIC_KX_PLL_CCALFMAX(x) ((x) << S_STATIC_KX_PLL_CCALFMAX) +#define F_STATIC_KX_PLL_CCALFMAX V_STATIC_KX_PLL_CCALFMAX(1U) + +#define S_STATIC_KX_PLL_CCALCVHOLD 19 +#define V_STATIC_KX_PLL_CCALCVHOLD(x) ((x) << S_STATIC_KX_PLL_CCALCVHOLD) +#define F_STATIC_KX_PLL_CCALCVHOLD V_STATIC_KX_PLL_CCALCVHOLD(1U) + +#define S_STATIC_KX_PLL_CCALBANDSEL 15 +#define M_STATIC_KX_PLL_CCALBANDSEL 0xfU +#define V_STATIC_KX_PLL_CCALBANDSEL(x) ((x) << S_STATIC_KX_PLL_CCALBANDSEL) +#define G_STATIC_KX_PLL_CCALBANDSEL(x) (((x) >> S_STATIC_KX_PLL_CCALBANDSEL) & M_STATIC_KX_PLL_CCALBANDSEL) + +#define S_STATIC_KX_PLL_BGOFFSET 11 +#define M_STATIC_KX_PLL_BGOFFSET 0xfU +#define V_STATIC_KX_PLL_BGOFFSET(x) ((x) << S_STATIC_KX_PLL_BGOFFSET) +#define G_STATIC_KX_PLL_BGOFFSET(x) (((x) >> S_STATIC_KX_PLL_BGOFFSET) & M_STATIC_KX_PLL_BGOFFSET) + +#define S_T6_STATIC_KX_PLL_P 8 +#define M_T6_STATIC_KX_PLL_P 0x7U +#define V_T6_STATIC_KX_PLL_P(x) ((x) << S_T6_STATIC_KX_PLL_P) +#define G_T6_STATIC_KX_PLL_P(x) (((x) >> S_T6_STATIC_KX_PLL_P) & M_T6_STATIC_KX_PLL_P) + +#define S_T6_STATIC_KX_PLL_N2 4 +#define M_T6_STATIC_KX_PLL_N2 0xfU +#define V_T6_STATIC_KX_PLL_N2(x) ((x) << S_T6_STATIC_KX_PLL_N2) +#define G_T6_STATIC_KX_PLL_N2(x) (((x) >> S_T6_STATIC_KX_PLL_N2) & M_T6_STATIC_KX_PLL_N2) + +#define S_T6_STATIC_KX_PLL_N1 0 +#define M_T6_STATIC_KX_PLL_N1 0xfU +#define V_T6_STATIC_KX_PLL_N1(x) ((x) << S_T6_STATIC_KX_PLL_N1) +#define G_T6_STATIC_KX_PLL_N1(x) (((x) >> S_T6_STATIC_KX_PLL_N1) & M_T6_STATIC_KX_PLL_N1) + #define A_DBG_PVT_REG_DRVN 0x610c #define S_PVT_REG_DRVN_EN 8 @@ -8790,6 +13772,18 @@ #define V_T5_STATIC_KX_PLL_ANALOGTUNE(x) ((x) << S_T5_STATIC_KX_PLL_ANALOGTUNE) #define G_T5_STATIC_KX_PLL_ANALOGTUNE(x) (((x) >> S_T5_STATIC_KX_PLL_ANALOGTUNE) & M_T5_STATIC_KX_PLL_ANALOGTUNE) +#define A_DBG_STATIC_KX_PLL_CONF2 0x610c + +#define S_T6_STATIC_KX_PLL_M 11 +#define M_T6_STATIC_KX_PLL_M 0x1ffU +#define V_T6_STATIC_KX_PLL_M(x) ((x) << S_T6_STATIC_KX_PLL_M) +#define G_T6_STATIC_KX_PLL_M(x) (((x) >> S_T6_STATIC_KX_PLL_M) & M_T6_STATIC_KX_PLL_M) + +#define S_STATIC_KX_PLL_ANALOGTUNE 0 +#define M_STATIC_KX_PLL_ANALOGTUNE 0x7ffU +#define V_STATIC_KX_PLL_ANALOGTUNE(x) ((x) << S_STATIC_KX_PLL_ANALOGTUNE) +#define G_STATIC_KX_PLL_ANALOGTUNE(x) (((x) >> S_STATIC_KX_PLL_ANALOGTUNE) & M_STATIC_KX_PLL_ANALOGTUNE) + #define A_DBG_PVT_REG_DRVP 0x6110 #define S_PVT_REG_DRVP_EN 8 @@ -8830,6 +13824,7 @@ #define V_STATIC_C_DFS_ENABLE(x) ((x) << S_STATIC_C_DFS_ENABLE) #define F_STATIC_C_DFS_ENABLE V_STATIC_C_DFS_ENABLE(1U) +#define A_DBG_STATIC_C_DFS_CONF 0x6110 #define A_DBG_PVT_REG_TERMN 0x6114 #define S_PVT_REG_TERMN_EN 8 @@ -8870,6 +13865,7 @@ #define V_STATIC_U_DFS_ENABLE(x) ((x) << S_STATIC_U_DFS_ENABLE) #define F_STATIC_U_DFS_ENABLE V_STATIC_U_DFS_ENABLE(1U) +#define A_DBG_STATIC_U_DFS_CONF 0x6114 #define A_DBG_PVT_REG_TERMP 0x6118 #define S_PVT_REG_TERMP_EN 8 @@ -9251,6 +14247,70 @@ #define V_SAMPLE_WAIT_CLKS(x) ((x) << S_SAMPLE_WAIT_CLKS) #define G_SAMPLE_WAIT_CLKS(x) (((x) >> S_SAMPLE_WAIT_CLKS) & M_SAMPLE_WAIT_CLKS) +#define A_DBG_STATIC_U_PLL_CONF6 0x6150 + +#define S_STATIC_U_PLL_VREGTUNE 0 +#define M_STATIC_U_PLL_VREGTUNE 0x7ffffU +#define V_STATIC_U_PLL_VREGTUNE(x) ((x) << S_STATIC_U_PLL_VREGTUNE) +#define G_STATIC_U_PLL_VREGTUNE(x) (((x) >> S_STATIC_U_PLL_VREGTUNE) & M_STATIC_U_PLL_VREGTUNE) + +#define A_DBG_STATIC_C_PLL_CONF6 0x6154 + +#define S_STATIC_C_PLL_VREGTUNE 0 +#define M_STATIC_C_PLL_VREGTUNE 0x7ffffU +#define V_STATIC_C_PLL_VREGTUNE(x) ((x) << S_STATIC_C_PLL_VREGTUNE) +#define G_STATIC_C_PLL_VREGTUNE(x) (((x) >> S_STATIC_C_PLL_VREGTUNE) & M_STATIC_C_PLL_VREGTUNE) + +#define A_DBG_CUST_EFUSE_PROGRAM 0x6158 + +#define S_EFUSE_PROG_PERIOD 16 +#define M_EFUSE_PROG_PERIOD 0xffffU +#define V_EFUSE_PROG_PERIOD(x) ((x) << S_EFUSE_PROG_PERIOD) +#define G_EFUSE_PROG_PERIOD(x) (((x) >> S_EFUSE_PROG_PERIOD) & M_EFUSE_PROG_PERIOD) + +#define S_EFUSE_OPER_TYP 14 +#define M_EFUSE_OPER_TYP 0x3U +#define V_EFUSE_OPER_TYP(x) ((x) << S_EFUSE_OPER_TYP) +#define G_EFUSE_OPER_TYP(x) (((x) >> S_EFUSE_OPER_TYP) & M_EFUSE_OPER_TYP) + +#define S_EFUSE_ADDR 8 +#define M_EFUSE_ADDR 0x3fU +#define V_EFUSE_ADDR(x) ((x) << S_EFUSE_ADDR) +#define G_EFUSE_ADDR(x) (((x) >> S_EFUSE_ADDR) & M_EFUSE_ADDR) + +#define S_EFUSE_DIN 0 +#define M_EFUSE_DIN 0xffU +#define V_EFUSE_DIN(x) ((x) << S_EFUSE_DIN) +#define G_EFUSE_DIN(x) (((x) >> S_EFUSE_DIN) & M_EFUSE_DIN) + +#define A_DBG_CUST_EFUSE_OUT 0x615c + +#define S_EFUSE_OPER_DONE 8 +#define V_EFUSE_OPER_DONE(x) ((x) << S_EFUSE_OPER_DONE) +#define F_EFUSE_OPER_DONE V_EFUSE_OPER_DONE(1U) + +#define S_EFUSE_DOUT 0 +#define M_EFUSE_DOUT 0xffU +#define V_EFUSE_DOUT(x) ((x) << S_EFUSE_DOUT) +#define G_EFUSE_DOUT(x) (((x) >> S_EFUSE_DOUT) & M_EFUSE_DOUT) + +#define A_DBG_CUST_EFUSE_BYTE0_3 0x6160 +#define A_DBG_CUST_EFUSE_BYTE4_7 0x6164 +#define A_DBG_CUST_EFUSE_BYTE8_11 0x6168 +#define A_DBG_CUST_EFUSE_BYTE12_15 0x616c +#define A_DBG_CUST_EFUSE_BYTE16_19 0x6170 +#define A_DBG_CUST_EFUSE_BYTE20_23 0x6174 +#define A_DBG_CUST_EFUSE_BYTE24_27 0x6178 +#define A_DBG_CUST_EFUSE_BYTE28_31 0x617c +#define A_DBG_CUST_EFUSE_BYTE32_35 0x6180 +#define A_DBG_CUST_EFUSE_BYTE36_39 0x6184 +#define A_DBG_CUST_EFUSE_BYTE40_43 0x6188 +#define A_DBG_CUST_EFUSE_BYTE44_47 0x618c +#define A_DBG_CUST_EFUSE_BYTE48_51 0x6190 +#define A_DBG_CUST_EFUSE_BYTE52_55 0x6194 +#define A_DBG_CUST_EFUSE_BYTE56_59 0x6198 +#define A_DBG_CUST_EFUSE_BYTE60_63 0x619c + /* registers for module MC */ #define MC_BASE_ADDR 0x6200 @@ -11127,6 +16187,19 @@ #define V_EXT_MEM_PAGE_SIZE1(x) ((x) << S_EXT_MEM_PAGE_SIZE1) #define G_EXT_MEM_PAGE_SIZE1(x) (((x) >> S_EXT_MEM_PAGE_SIZE1) & M_EXT_MEM_PAGE_SIZE1) +#define S_BRBC_MODE 4 +#define V_BRBC_MODE(x) ((x) << S_BRBC_MODE) +#define F_BRBC_MODE V_BRBC_MODE(1U) + +#define S_T6_BRC_MODE 3 +#define V_T6_BRC_MODE(x) ((x) << S_T6_BRC_MODE) +#define F_T6_BRC_MODE V_T6_BRC_MODE(1U) + +#define S_T6_EXT_MEM_PAGE_SIZE 0 +#define M_T6_EXT_MEM_PAGE_SIZE 0x7U +#define V_T6_EXT_MEM_PAGE_SIZE(x) ((x) << S_T6_EXT_MEM_PAGE_SIZE) +#define G_T6_EXT_MEM_PAGE_SIZE(x) (((x) >> S_T6_EXT_MEM_PAGE_SIZE) & M_T6_EXT_MEM_PAGE_SIZE) + #define A_MA_ARB_CTRL 0x77d4 #define S_DIS_PAGE_HINT 1 @@ -11141,6 +16214,48 @@ #define V_DIS_BANK_FAIR(x) ((x) << S_DIS_BANK_FAIR) #define F_DIS_BANK_FAIR V_DIS_BANK_FAIR(1U) +#define S_HMA_WRT_EN 26 +#define V_HMA_WRT_EN(x) ((x) << S_HMA_WRT_EN) +#define F_HMA_WRT_EN V_HMA_WRT_EN(1U) + +#define S_HMA_NUM_PG_128B_FDBK 21 +#define M_HMA_NUM_PG_128B_FDBK 0x1fU +#define V_HMA_NUM_PG_128B_FDBK(x) ((x) << S_HMA_NUM_PG_128B_FDBK) +#define G_HMA_NUM_PG_128B_FDBK(x) (((x) >> S_HMA_NUM_PG_128B_FDBK) & M_HMA_NUM_PG_128B_FDBK) + +#define S_HMA_DIS_128B_PG_CNT_FDBK 20 +#define V_HMA_DIS_128B_PG_CNT_FDBK(x) ((x) << S_HMA_DIS_128B_PG_CNT_FDBK) +#define F_HMA_DIS_128B_PG_CNT_FDBK V_HMA_DIS_128B_PG_CNT_FDBK(1U) + +#define S_HMA_DIS_BG_ARB 19 +#define V_HMA_DIS_BG_ARB(x) ((x) << S_HMA_DIS_BG_ARB) +#define F_HMA_DIS_BG_ARB V_HMA_DIS_BG_ARB(1U) + +#define S_HMA_DIS_BANK_FAIR 18 +#define V_HMA_DIS_BANK_FAIR(x) ((x) << S_HMA_DIS_BANK_FAIR) +#define F_HMA_DIS_BANK_FAIR V_HMA_DIS_BANK_FAIR(1U) + +#define S_HMA_DIS_PAGE_HINT 17 +#define V_HMA_DIS_PAGE_HINT(x) ((x) << S_HMA_DIS_PAGE_HINT) +#define F_HMA_DIS_PAGE_HINT V_HMA_DIS_PAGE_HINT(1U) + +#define S_HMA_DIS_ADV_ARB 16 +#define V_HMA_DIS_ADV_ARB(x) ((x) << S_HMA_DIS_ADV_ARB) +#define F_HMA_DIS_ADV_ARB V_HMA_DIS_ADV_ARB(1U) + +#define S_NUM_PG_128B_FDBK 5 +#define M_NUM_PG_128B_FDBK 0x1fU +#define V_NUM_PG_128B_FDBK(x) ((x) << S_NUM_PG_128B_FDBK) +#define G_NUM_PG_128B_FDBK(x) (((x) >> S_NUM_PG_128B_FDBK) & M_NUM_PG_128B_FDBK) + +#define S_DIS_128B_PG_CNT_FDBK 4 +#define V_DIS_128B_PG_CNT_FDBK(x) ((x) << S_DIS_128B_PG_CNT_FDBK) +#define F_DIS_128B_PG_CNT_FDBK V_DIS_128B_PG_CNT_FDBK(1U) + +#define S_DIS_BG_ARB 3 +#define V_DIS_BG_ARB(x) ((x) << S_DIS_BG_ARB) +#define F_DIS_BG_ARB V_DIS_BG_ARB(1U) + #define A_MA_TARGET_MEM_ENABLE 0x77d8 #define S_HMA_ENABLE 3 @@ -11171,6 +16286,10 @@ #define V_EXT_MEM0_ENABLE(x) ((x) << S_EXT_MEM0_ENABLE) #define F_EXT_MEM0_ENABLE V_EXT_MEM0_ENABLE(1U) +#define S_MC_SPLIT 6 +#define V_MC_SPLIT(x) ((x) << S_MC_SPLIT) +#define F_MC_SPLIT V_MC_SPLIT(1U) + #define A_MA_INT_ENABLE 0x77dc #define S_MEM_PERR_INT_ENABLE 1 @@ -11519,6 +16638,11 @@ #define V_FUTURE_EXPANSION(x) ((x) << S_FUTURE_EXPANSION) #define G_FUTURE_EXPANSION(x) (((x) >> S_FUTURE_EXPANSION) & M_FUTURE_EXPANSION) +#define S_FUTURE_EXPANSION_EE 1 +#define M_FUTURE_EXPANSION_EE 0x7fffffffU +#define V_FUTURE_EXPANSION_EE(x) ((x) << S_FUTURE_EXPANSION_EE) +#define G_FUTURE_EXPANSION_EE(x) (((x) >> S_FUTURE_EXPANSION_EE) & M_FUTURE_EXPANSION_EE) + #define A_MA_PARITY_ERROR_ENABLE2 0x7800 #define S_ARB4_PAR_WRQUEUE_ERROR_EN 1 @@ -11590,7 +16714,24 @@ #define A_MA_PMRX_RDDATA_CNT 0x7874 #define A_MA_HMA_RDDATA_CNT 0x7878 #define A_MA_EDRAM0_WRDATA_CNT1 0x787c +#define A_MA_EXIT_ADDR_FAULT 0x787c + +#define S_EXIT_ADDR_FAULT 0 +#define V_EXIT_ADDR_FAULT(x) ((x) << S_EXIT_ADDR_FAULT) +#define F_EXIT_ADDR_FAULT V_EXIT_ADDR_FAULT(1U) + #define A_MA_EDRAM0_WRDATA_CNT0 0x7880 +#define A_MA_DDR_DEVICE_CFG 0x7880 + +#define S_MEM_WIDTH 1 +#define M_MEM_WIDTH 0x7U +#define V_MEM_WIDTH(x) ((x) << S_MEM_WIDTH) +#define G_MEM_WIDTH(x) (((x) >> S_MEM_WIDTH) & M_MEM_WIDTH) + +#define S_DDR_MODE 0 +#define V_DDR_MODE(x) ((x) << S_DDR_MODE) +#define F_DDR_MODE V_DDR_MODE(1U) + #define A_MA_EDRAM1_WRDATA_CNT1 0x7884 #define A_MA_EDRAM1_WRDATA_CNT0 0x7888 #define A_MA_EXT_MEMORY0_WRDATA_CNT1 0x788c @@ -11764,6 +16905,16 @@ #define V_CL0_WR_DATA_TO_EN(x) ((x) << S_CL0_WR_DATA_TO_EN) #define F_CL0_WR_DATA_TO_EN V_CL0_WR_DATA_TO_EN(1U) +#define S_FUTURE_CEXPANSION_WTE 29 +#define M_FUTURE_CEXPANSION_WTE 0x7U +#define V_FUTURE_CEXPANSION_WTE(x) ((x) << S_FUTURE_CEXPANSION_WTE) +#define G_FUTURE_CEXPANSION_WTE(x) (((x) >> S_FUTURE_CEXPANSION_WTE) & M_FUTURE_CEXPANSION_WTE) + +#define S_FUTURE_DEXPANSION_WTE 13 +#define M_FUTURE_DEXPANSION_WTE 0x7U +#define V_FUTURE_DEXPANSION_WTE(x) ((x) << S_FUTURE_DEXPANSION_WTE) +#define G_FUTURE_DEXPANSION_WTE(x) (((x) >> S_FUTURE_DEXPANSION_WTE) & M_FUTURE_DEXPANSION_WTE) + #define A_MA_WRITE_TIMEOUT_ERROR_STATUS 0x78d8 #define S_CL12_WR_CMD_TO_ERROR 28 @@ -11870,6 +17021,16 @@ #define V_CL0_WR_DATA_TO_ERROR(x) ((x) << S_CL0_WR_DATA_TO_ERROR) #define F_CL0_WR_DATA_TO_ERROR V_CL0_WR_DATA_TO_ERROR(1U) +#define S_FUTURE_CEXPANSION_WTS 29 +#define M_FUTURE_CEXPANSION_WTS 0x7U +#define V_FUTURE_CEXPANSION_WTS(x) ((x) << S_FUTURE_CEXPANSION_WTS) +#define G_FUTURE_CEXPANSION_WTS(x) (((x) >> S_FUTURE_CEXPANSION_WTS) & M_FUTURE_CEXPANSION_WTS) + +#define S_FUTURE_DEXPANSION_WTS 13 +#define M_FUTURE_DEXPANSION_WTS 0x7U +#define V_FUTURE_DEXPANSION_WTS(x) ((x) << S_FUTURE_DEXPANSION_WTS) +#define G_FUTURE_DEXPANSION_WTS(x) (((x) >> S_FUTURE_DEXPANSION_WTS) & M_FUTURE_DEXPANSION_WTS) + #define A_MA_READ_TIMEOUT_ERROR_ENABLE 0x78dc #define S_CL12_RD_CMD_TO_EN 28 @@ -11976,6 +17137,16 @@ #define V_CL0_RD_DATA_TO_EN(x) ((x) << S_CL0_RD_DATA_TO_EN) #define F_CL0_RD_DATA_TO_EN V_CL0_RD_DATA_TO_EN(1U) +#define S_FUTURE_CEXPANSION_RTE 29 +#define M_FUTURE_CEXPANSION_RTE 0x7U +#define V_FUTURE_CEXPANSION_RTE(x) ((x) << S_FUTURE_CEXPANSION_RTE) +#define G_FUTURE_CEXPANSION_RTE(x) (((x) >> S_FUTURE_CEXPANSION_RTE) & M_FUTURE_CEXPANSION_RTE) + +#define S_FUTURE_DEXPANSION_RTE 13 +#define M_FUTURE_DEXPANSION_RTE 0x7U +#define V_FUTURE_DEXPANSION_RTE(x) ((x) << S_FUTURE_DEXPANSION_RTE) +#define G_FUTURE_DEXPANSION_RTE(x) (((x) >> S_FUTURE_DEXPANSION_RTE) & M_FUTURE_DEXPANSION_RTE) + #define A_MA_READ_TIMEOUT_ERROR_STATUS 0x78e0 #define S_CL12_RD_CMD_TO_ERROR 28 @@ -12082,6 +17253,16 @@ #define V_CL0_RD_DATA_TO_ERROR(x) ((x) << S_CL0_RD_DATA_TO_ERROR) #define F_CL0_RD_DATA_TO_ERROR V_CL0_RD_DATA_TO_ERROR(1U) +#define S_FUTURE_CEXPANSION_RTS 29 +#define M_FUTURE_CEXPANSION_RTS 0x7U +#define V_FUTURE_CEXPANSION_RTS(x) ((x) << S_FUTURE_CEXPANSION_RTS) +#define G_FUTURE_CEXPANSION_RTS(x) (((x) >> S_FUTURE_CEXPANSION_RTS) & M_FUTURE_CEXPANSION_RTS) + +#define S_FUTURE_DEXPANSION_RTS 13 +#define M_FUTURE_DEXPANSION_RTS 0x7U +#define V_FUTURE_DEXPANSION_RTS(x) ((x) << S_FUTURE_DEXPANSION_RTS) +#define G_FUTURE_DEXPANSION_RTS(x) (((x) >> S_FUTURE_DEXPANSION_RTS) & M_FUTURE_DEXPANSION_RTS) + #define A_MA_BKP_CNT_SEL 0x78e4 #define S_BKP_CNT_TYPE 30 @@ -12118,6 +17299,11 @@ #define A_MA_IF_PARITY_ERROR_ENABLE 0x78f0 +#define S_T5_FUTURE_DEXPANSION 13 +#define M_T5_FUTURE_DEXPANSION 0x7ffffU +#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION) +#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION) + #define S_CL12_IF_PAR_EN 12 #define V_CL12_IF_PAR_EN(x) ((x) << S_CL12_IF_PAR_EN) #define F_CL12_IF_PAR_EN V_CL12_IF_PAR_EN(1U) @@ -12170,8 +17356,18 @@ #define V_CL0_IF_PAR_EN(x) ((x) << S_CL0_IF_PAR_EN) #define F_CL0_IF_PAR_EN V_CL0_IF_PAR_EN(1U) +#define S_FUTURE_DEXPANSION_IPE 13 +#define M_FUTURE_DEXPANSION_IPE 0x7ffffU +#define V_FUTURE_DEXPANSION_IPE(x) ((x) << S_FUTURE_DEXPANSION_IPE) +#define G_FUTURE_DEXPANSION_IPE(x) (((x) >> S_FUTURE_DEXPANSION_IPE) & M_FUTURE_DEXPANSION_IPE) + #define A_MA_IF_PARITY_ERROR_STATUS 0x78f4 +#define S_T5_FUTURE_DEXPANSION 13 +#define M_T5_FUTURE_DEXPANSION 0x7ffffU +#define V_T5_FUTURE_DEXPANSION(x) ((x) << S_T5_FUTURE_DEXPANSION) +#define G_T5_FUTURE_DEXPANSION(x) (((x) >> S_T5_FUTURE_DEXPANSION) & M_T5_FUTURE_DEXPANSION) + #define S_CL12_IF_PAR_ERROR 12 #define V_CL12_IF_PAR_ERROR(x) ((x) << S_CL12_IF_PAR_ERROR) #define F_CL12_IF_PAR_ERROR V_CL12_IF_PAR_ERROR(1U) @@ -12224,6 +17420,11 @@ #define V_CL0_IF_PAR_ERROR(x) ((x) << S_CL0_IF_PAR_ERROR) #define F_CL0_IF_PAR_ERROR V_CL0_IF_PAR_ERROR(1U) +#define S_FUTURE_DEXPANSION_IPS 13 +#define M_FUTURE_DEXPANSION_IPS 0x7ffffU +#define V_FUTURE_DEXPANSION_IPS(x) ((x) << S_FUTURE_DEXPANSION_IPS) +#define G_FUTURE_DEXPANSION_IPS(x) (((x) >> S_FUTURE_DEXPANSION_IPS) & M_FUTURE_DEXPANSION_IPS) + #define A_MA_LOCAL_DEBUG_CFG 0x78f8 #define S_DEBUG_OR 15 @@ -12244,6 +17445,2823 @@ #define G_DEBUGPAGE(x) (((x) >> S_DEBUGPAGE) & M_DEBUGPAGE) #define A_MA_LOCAL_DEBUG_RPT 0x78fc +#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa000 + +#define S_CMDVLD0 31 +#define V_CMDVLD0(x) ((x) << S_CMDVLD0) +#define F_CMDVLD0 V_CMDVLD0(1U) + +#define S_CMDRDY0 30 +#define V_CMDRDY0(x) ((x) << S_CMDRDY0) +#define F_CMDRDY0 V_CMDRDY0(1U) + +#define S_CMDTYPE0 29 +#define V_CMDTYPE0(x) ((x) << S_CMDTYPE0) +#define F_CMDTYPE0 V_CMDTYPE0(1U) + +#define S_CMDLEN0 21 +#define M_CMDLEN0 0xffU +#define V_CMDLEN0(x) ((x) << S_CMDLEN0) +#define G_CMDLEN0(x) (((x) >> S_CMDLEN0) & M_CMDLEN0) + +#define S_CMDADDR0 8 +#define M_CMDADDR0 0x1fffU +#define V_CMDADDR0(x) ((x) << S_CMDADDR0) +#define G_CMDADDR0(x) (((x) >> S_CMDADDR0) & M_CMDADDR0) + +#define S_WRDATAVLD0 7 +#define V_WRDATAVLD0(x) ((x) << S_WRDATAVLD0) +#define F_WRDATAVLD0 V_WRDATAVLD0(1U) + +#define S_WRDATARDY0 6 +#define V_WRDATARDY0(x) ((x) << S_WRDATARDY0) +#define F_WRDATARDY0 V_WRDATARDY0(1U) + +#define S_RDDATARDY0 5 +#define V_RDDATARDY0(x) ((x) << S_RDDATARDY0) +#define F_RDDATARDY0 V_RDDATARDY0(1U) + +#define S_RDDATAVLD0 4 +#define V_RDDATAVLD0(x) ((x) << S_RDDATAVLD0) +#define F_RDDATAVLD0 V_RDDATAVLD0(1U) + +#define S_RDDATA0 0 +#define M_RDDATA0 0xfU +#define V_RDDATA0(x) ((x) << S_RDDATA0) +#define G_RDDATA0(x) (((x) >> S_RDDATA0) & M_RDDATA0) + +#define A_MA_SGE_THREAD_1_CLIENT_INTERFACE_EXTERNAL 0xa001 + +#define S_CMDVLD1 31 +#define V_CMDVLD1(x) ((x) << S_CMDVLD1) +#define F_CMDVLD1 V_CMDVLD1(1U) + +#define S_CMDRDY1 30 +#define V_CMDRDY1(x) ((x) << S_CMDRDY1) +#define F_CMDRDY1 V_CMDRDY1(1U) + +#define S_CMDTYPE1 29 +#define V_CMDTYPE1(x) ((x) << S_CMDTYPE1) +#define F_CMDTYPE1 V_CMDTYPE1(1U) + +#define S_CMDLEN1 21 +#define M_CMDLEN1 0xffU +#define V_CMDLEN1(x) ((x) << S_CMDLEN1) +#define G_CMDLEN1(x) (((x) >> S_CMDLEN1) & M_CMDLEN1) + +#define S_CMDADDR1 8 +#define M_CMDADDR1 0x1fffU +#define V_CMDADDR1(x) ((x) << S_CMDADDR1) +#define G_CMDADDR1(x) (((x) >> S_CMDADDR1) & M_CMDADDR1) + +#define S_WRDATAVLD1 7 +#define V_WRDATAVLD1(x) ((x) << S_WRDATAVLD1) +#define F_WRDATAVLD1 V_WRDATAVLD1(1U) + +#define S_WRDATARDY1 6 +#define V_WRDATARDY1(x) ((x) << S_WRDATARDY1) +#define F_WRDATARDY1 V_WRDATARDY1(1U) + +#define S_RDDATARDY1 5 +#define V_RDDATARDY1(x) ((x) << S_RDDATARDY1) +#define F_RDDATARDY1 V_RDDATARDY1(1U) + +#define S_RDDATAVLD1 4 +#define V_RDDATAVLD1(x) ((x) << S_RDDATAVLD1) +#define F_RDDATAVLD1 V_RDDATAVLD1(1U) + +#define S_RDDATA1 0 +#define M_RDDATA1 0xfU +#define V_RDDATA1(x) ((x) << S_RDDATA1) +#define G_RDDATA1(x) (((x) >> S_RDDATA1) & M_RDDATA1) + +#define A_MA_ULP_TX_CLIENT_INTERFACE_EXTERNAL 0xa002 + +#define S_CMDVLD2 31 +#define V_CMDVLD2(x) ((x) << S_CMDVLD2) +#define F_CMDVLD2 V_CMDVLD2(1U) + +#define S_CMDRDY2 30 +#define V_CMDRDY2(x) ((x) << S_CMDRDY2) +#define F_CMDRDY2 V_CMDRDY2(1U) + +#define S_CMDTYPE2 29 +#define V_CMDTYPE2(x) ((x) << S_CMDTYPE2) +#define F_CMDTYPE2 V_CMDTYPE2(1U) + +#define S_CMDLEN2 21 +#define M_CMDLEN2 0xffU +#define V_CMDLEN2(x) ((x) << S_CMDLEN2) +#define G_CMDLEN2(x) (((x) >> S_CMDLEN2) & M_CMDLEN2) + +#define S_CMDADDR2 8 +#define M_CMDADDR2 0x1fffU +#define V_CMDADDR2(x) ((x) << S_CMDADDR2) +#define G_CMDADDR2(x) (((x) >> S_CMDADDR2) & M_CMDADDR2) + +#define S_WRDATAVLD2 7 +#define V_WRDATAVLD2(x) ((x) << S_WRDATAVLD2) +#define F_WRDATAVLD2 V_WRDATAVLD2(1U) + +#define S_WRDATARDY2 6 +#define V_WRDATARDY2(x) ((x) << S_WRDATARDY2) +#define F_WRDATARDY2 V_WRDATARDY2(1U) + +#define S_RDDATARDY2 5 +#define V_RDDATARDY2(x) ((x) << S_RDDATARDY2) +#define F_RDDATARDY2 V_RDDATARDY2(1U) + +#define S_RDDATAVLD2 4 +#define V_RDDATAVLD2(x) ((x) << S_RDDATAVLD2) +#define F_RDDATAVLD2 V_RDDATAVLD2(1U) + +#define S_RDDATA2 0 +#define M_RDDATA2 0xfU +#define V_RDDATA2(x) ((x) << S_RDDATA2) +#define G_RDDATA2(x) (((x) >> S_RDDATA2) & M_RDDATA2) + +#define A_MA_ULP_RX_CLIENT_INTERFACE_EXTERNAL 0xa003 + +#define S_CMDVLD3 31 +#define V_CMDVLD3(x) ((x) << S_CMDVLD3) +#define F_CMDVLD3 V_CMDVLD3(1U) + +#define S_CMDRDY3 30 +#define V_CMDRDY3(x) ((x) << S_CMDRDY3) +#define F_CMDRDY3 V_CMDRDY3(1U) + +#define S_CMDTYPE3 29 +#define V_CMDTYPE3(x) ((x) << S_CMDTYPE3) +#define F_CMDTYPE3 V_CMDTYPE3(1U) + +#define S_CMDLEN3 21 +#define M_CMDLEN3 0xffU +#define V_CMDLEN3(x) ((x) << S_CMDLEN3) +#define G_CMDLEN3(x) (((x) >> S_CMDLEN3) & M_CMDLEN3) + +#define S_CMDADDR3 8 +#define M_CMDADDR3 0x1fffU +#define V_CMDADDR3(x) ((x) << S_CMDADDR3) +#define G_CMDADDR3(x) (((x) >> S_CMDADDR3) & M_CMDADDR3) + +#define S_WRDATAVLD3 7 +#define V_WRDATAVLD3(x) ((x) << S_WRDATAVLD3) +#define F_WRDATAVLD3 V_WRDATAVLD3(1U) + +#define S_WRDATARDY3 6 +#define V_WRDATARDY3(x) ((x) << S_WRDATARDY3) +#define F_WRDATARDY3 V_WRDATARDY3(1U) + +#define S_RDDATARDY3 5 +#define V_RDDATARDY3(x) ((x) << S_RDDATARDY3) +#define F_RDDATARDY3 V_RDDATARDY3(1U) + +#define S_RDDATAVLD3 4 +#define V_RDDATAVLD3(x) ((x) << S_RDDATAVLD3) +#define F_RDDATAVLD3 V_RDDATAVLD3(1U) + +#define S_RDDATA3 0 +#define M_RDDATA3 0xfU +#define V_RDDATA3(x) ((x) << S_RDDATA3) +#define G_RDDATA3(x) (((x) >> S_RDDATA3) & M_RDDATA3) + +#define A_MA_ULP_TX_RX_CLIENT_INTERFACE_EXTERNAL 0xa004 + +#define S_CMDVLD4 31 +#define V_CMDVLD4(x) ((x) << S_CMDVLD4) +#define F_CMDVLD4 V_CMDVLD4(1U) + +#define S_CMDRDY4 30 +#define V_CMDRDY4(x) ((x) << S_CMDRDY4) +#define F_CMDRDY4 V_CMDRDY4(1U) + +#define S_CMDTYPE4 29 +#define V_CMDTYPE4(x) ((x) << S_CMDTYPE4) +#define F_CMDTYPE4 V_CMDTYPE4(1U) + +#define S_CMDLEN4 21 +#define M_CMDLEN4 0xffU +#define V_CMDLEN4(x) ((x) << S_CMDLEN4) +#define G_CMDLEN4(x) (((x) >> S_CMDLEN4) & M_CMDLEN4) + +#define S_CMDADDR4 8 +#define M_CMDADDR4 0x1fffU +#define V_CMDADDR4(x) ((x) << S_CMDADDR4) +#define G_CMDADDR4(x) (((x) >> S_CMDADDR4) & M_CMDADDR4) + +#define S_WRDATAVLD4 7 +#define V_WRDATAVLD4(x) ((x) << S_WRDATAVLD4) +#define F_WRDATAVLD4 V_WRDATAVLD4(1U) + +#define S_WRDATARDY4 6 +#define V_WRDATARDY4(x) ((x) << S_WRDATARDY4) +#define F_WRDATARDY4 V_WRDATARDY4(1U) + +#define S_RDDATARDY4 5 +#define V_RDDATARDY4(x) ((x) << S_RDDATARDY4) +#define F_RDDATARDY4 V_RDDATARDY4(1U) + +#define S_RDDATAVLD4 4 +#define V_RDDATAVLD4(x) ((x) << S_RDDATAVLD4) +#define F_RDDATAVLD4 V_RDDATAVLD4(1U) + +#define S_RDDATA4 0 +#define M_RDDATA4 0xfU +#define V_RDDATA4(x) ((x) << S_RDDATA4) +#define G_RDDATA4(x) (((x) >> S_RDDATA4) & M_RDDATA4) + +#define A_MA_TP_THREAD_0_CLIENT_INTERFACE_EXTERNAL 0xa005 + +#define S_CMDVLD5 31 +#define V_CMDVLD5(x) ((x) << S_CMDVLD5) +#define F_CMDVLD5 V_CMDVLD5(1U) + +#define S_CMDRDY5 30 +#define V_CMDRDY5(x) ((x) << S_CMDRDY5) +#define F_CMDRDY5 V_CMDRDY5(1U) + +#define S_CMDTYPE5 29 +#define V_CMDTYPE5(x) ((x) << S_CMDTYPE5) +#define F_CMDTYPE5 V_CMDTYPE5(1U) + +#define S_CMDLEN5 21 +#define M_CMDLEN5 0xffU +#define V_CMDLEN5(x) ((x) << S_CMDLEN5) +#define G_CMDLEN5(x) (((x) >> S_CMDLEN5) & M_CMDLEN5) + +#define S_CMDADDR5 8 +#define M_CMDADDR5 0x1fffU +#define V_CMDADDR5(x) ((x) << S_CMDADDR5) +#define G_CMDADDR5(x) (((x) >> S_CMDADDR5) & M_CMDADDR5) + +#define S_WRDATAVLD5 7 +#define V_WRDATAVLD5(x) ((x) << S_WRDATAVLD5) +#define F_WRDATAVLD5 V_WRDATAVLD5(1U) + +#define S_WRDATARDY5 6 +#define V_WRDATARDY5(x) ((x) << S_WRDATARDY5) +#define F_WRDATARDY5 V_WRDATARDY5(1U) + +#define S_RDDATARDY5 5 +#define V_RDDATARDY5(x) ((x) << S_RDDATARDY5) +#define F_RDDATARDY5 V_RDDATARDY5(1U) + +#define S_RDDATAVLD5 4 +#define V_RDDATAVLD5(x) ((x) << S_RDDATAVLD5) +#define F_RDDATAVLD5 V_RDDATAVLD5(1U) + +#define S_RDDATA5 0 +#define M_RDDATA5 0xfU +#define V_RDDATA5(x) ((x) << S_RDDATA5) +#define G_RDDATA5(x) (((x) >> S_RDDATA5) & M_RDDATA5) + +#define A_MA_TP_THREAD_1_CLIENT_INTERFACE_EXTERNAL 0xa006 + +#define S_CMDVLD6 31 +#define V_CMDVLD6(x) ((x) << S_CMDVLD6) +#define F_CMDVLD6 V_CMDVLD6(1U) + +#define S_CMDRDY6 30 +#define V_CMDRDY6(x) ((x) << S_CMDRDY6) +#define F_CMDRDY6 V_CMDRDY6(1U) + +#define S_CMDTYPE6 29 +#define V_CMDTYPE6(x) ((x) << S_CMDTYPE6) +#define F_CMDTYPE6 V_CMDTYPE6(1U) + +#define S_CMDLEN6 21 +#define M_CMDLEN6 0xffU +#define V_CMDLEN6(x) ((x) << S_CMDLEN6) +#define G_CMDLEN6(x) (((x) >> S_CMDLEN6) & M_CMDLEN6) + +#define S_CMDADDR6 8 +#define M_CMDADDR6 0x1fffU +#define V_CMDADDR6(x) ((x) << S_CMDADDR6) +#define G_CMDADDR6(x) (((x) >> S_CMDADDR6) & M_CMDADDR6) + +#define S_WRDATAVLD6 7 +#define V_WRDATAVLD6(x) ((x) << S_WRDATAVLD6) +#define F_WRDATAVLD6 V_WRDATAVLD6(1U) + +#define S_WRDATARDY6 6 +#define V_WRDATARDY6(x) ((x) << S_WRDATARDY6) +#define F_WRDATARDY6 V_WRDATARDY6(1U) + +#define S_RDDATARDY6 5 +#define V_RDDATARDY6(x) ((x) << S_RDDATARDY6) +#define F_RDDATARDY6 V_RDDATARDY6(1U) + +#define S_RDDATAVLD6 4 +#define V_RDDATAVLD6(x) ((x) << S_RDDATAVLD6) +#define F_RDDATAVLD6 V_RDDATAVLD6(1U) + +#define S_RDDATA6 0 +#define M_RDDATA6 0xfU +#define V_RDDATA6(x) ((x) << S_RDDATA6) +#define G_RDDATA6(x) (((x) >> S_RDDATA6) & M_RDDATA6) + +#define A_MA_LE_CLIENT_INTERFACE_EXTERNAL 0xa007 + +#define S_CMDVLD7 31 +#define V_CMDVLD7(x) ((x) << S_CMDVLD7) +#define F_CMDVLD7 V_CMDVLD7(1U) + +#define S_CMDRDY7 30 +#define V_CMDRDY7(x) ((x) << S_CMDRDY7) +#define F_CMDRDY7 V_CMDRDY7(1U) + +#define S_CMDTYPE7 29 +#define V_CMDTYPE7(x) ((x) << S_CMDTYPE7) +#define F_CMDTYPE7 V_CMDTYPE7(1U) + +#define S_CMDLEN7 21 +#define M_CMDLEN7 0xffU +#define V_CMDLEN7(x) ((x) << S_CMDLEN7) +#define G_CMDLEN7(x) (((x) >> S_CMDLEN7) & M_CMDLEN7) + +#define S_CMDADDR7 8 +#define M_CMDADDR7 0x1fffU +#define V_CMDADDR7(x) ((x) << S_CMDADDR7) +#define G_CMDADDR7(x) (((x) >> S_CMDADDR7) & M_CMDADDR7) + +#define S_WRDATAVLD7 7 +#define V_WRDATAVLD7(x) ((x) << S_WRDATAVLD7) +#define F_WRDATAVLD7 V_WRDATAVLD7(1U) + +#define S_WRDATARDY7 6 +#define V_WRDATARDY7(x) ((x) << S_WRDATARDY7) +#define F_WRDATARDY7 V_WRDATARDY7(1U) + +#define S_RDDATARDY7 5 +#define V_RDDATARDY7(x) ((x) << S_RDDATARDY7) +#define F_RDDATARDY7 V_RDDATARDY7(1U) + +#define S_RDDATAVLD7 4 +#define V_RDDATAVLD7(x) ((x) << S_RDDATAVLD7) +#define F_RDDATAVLD7 V_RDDATAVLD7(1U) + +#define S_RDDATA7 0 +#define M_RDDATA7 0xfU +#define V_RDDATA7(x) ((x) << S_RDDATA7) +#define G_RDDATA7(x) (((x) >> S_RDDATA7) & M_RDDATA7) + +#define A_MA_CIM_CLIENT_INTERFACE_EXTERNAL 0xa008 + +#define S_CMDVLD8 31 +#define V_CMDVLD8(x) ((x) << S_CMDVLD8) +#define F_CMDVLD8 V_CMDVLD8(1U) + +#define S_CMDRDY8 30 +#define V_CMDRDY8(x) ((x) << S_CMDRDY8) +#define F_CMDRDY8 V_CMDRDY8(1U) + +#define S_CMDTYPE8 29 +#define V_CMDTYPE8(x) ((x) << S_CMDTYPE8) +#define F_CMDTYPE8 V_CMDTYPE8(1U) + +#define S_CMDLEN8 21 +#define M_CMDLEN8 0xffU +#define V_CMDLEN8(x) ((x) << S_CMDLEN8) +#define G_CMDLEN8(x) (((x) >> S_CMDLEN8) & M_CMDLEN8) + +#define S_CMDADDR8 8 +#define M_CMDADDR8 0x1fffU +#define V_CMDADDR8(x) ((x) << S_CMDADDR8) +#define G_CMDADDR8(x) (((x) >> S_CMDADDR8) & M_CMDADDR8) + +#define S_WRDATAVLD8 7 +#define V_WRDATAVLD8(x) ((x) << S_WRDATAVLD8) +#define F_WRDATAVLD8 V_WRDATAVLD8(1U) + +#define S_WRDATARDY8 6 +#define V_WRDATARDY8(x) ((x) << S_WRDATARDY8) +#define F_WRDATARDY8 V_WRDATARDY8(1U) + +#define S_RDDATARDY8 5 +#define V_RDDATARDY8(x) ((x) << S_RDDATARDY8) +#define F_RDDATARDY8 V_RDDATARDY8(1U) + +#define S_RDDATAVLD8 4 +#define V_RDDATAVLD8(x) ((x) << S_RDDATAVLD8) +#define F_RDDATAVLD8 V_RDDATAVLD8(1U) + +#define S_RDDATA8 0 +#define M_RDDATA8 0xfU +#define V_RDDATA8(x) ((x) << S_RDDATA8) +#define G_RDDATA8(x) (((x) >> S_RDDATA8) & M_RDDATA8) + +#define A_MA_PCIE_CLIENT_INTERFACE_EXTERNAL 0xa009 + +#define S_CMDVLD9 31 +#define V_CMDVLD9(x) ((x) << S_CMDVLD9) +#define F_CMDVLD9 V_CMDVLD9(1U) + +#define S_CMDRDY9 30 +#define V_CMDRDY9(x) ((x) << S_CMDRDY9) +#define F_CMDRDY9 V_CMDRDY9(1U) + +#define S_CMDTYPE9 29 +#define V_CMDTYPE9(x) ((x) << S_CMDTYPE9) +#define F_CMDTYPE9 V_CMDTYPE9(1U) + +#define S_CMDLEN9 21 +#define M_CMDLEN9 0xffU +#define V_CMDLEN9(x) ((x) << S_CMDLEN9) +#define G_CMDLEN9(x) (((x) >> S_CMDLEN9) & M_CMDLEN9) + +#define S_CMDADDR9 8 +#define M_CMDADDR9 0x1fffU +#define V_CMDADDR9(x) ((x) << S_CMDADDR9) +#define G_CMDADDR9(x) (((x) >> S_CMDADDR9) & M_CMDADDR9) + +#define S_WRDATAVLD9 7 +#define V_WRDATAVLD9(x) ((x) << S_WRDATAVLD9) +#define F_WRDATAVLD9 V_WRDATAVLD9(1U) + +#define S_WRDATARDY9 6 +#define V_WRDATARDY9(x) ((x) << S_WRDATARDY9) +#define F_WRDATARDY9 V_WRDATARDY9(1U) + +#define S_RDDATARDY9 5 +#define V_RDDATARDY9(x) ((x) << S_RDDATARDY9) +#define F_RDDATARDY9 V_RDDATARDY9(1U) + +#define S_RDDATAVLD9 4 +#define V_RDDATAVLD9(x) ((x) << S_RDDATAVLD9) +#define F_RDDATAVLD9 V_RDDATAVLD9(1U) + +#define S_RDDATA9 0 +#define M_RDDATA9 0xfU +#define V_RDDATA9(x) ((x) << S_RDDATA9) +#define G_RDDATA9(x) (((x) >> S_RDDATA9) & M_RDDATA9) + +#define A_MA_PM_TX_CLIENT_INTERFACE_EXTERNAL 0xa00a + +#define S_CMDVLD10 31 +#define V_CMDVLD10(x) ((x) << S_CMDVLD10) +#define F_CMDVLD10 V_CMDVLD10(1U) + +#define S_CMDRDY10 30 +#define V_CMDRDY10(x) ((x) << S_CMDRDY10) +#define F_CMDRDY10 V_CMDRDY10(1U) + +#define S_CMDTYPE10 29 +#define V_CMDTYPE10(x) ((x) << S_CMDTYPE10) +#define F_CMDTYPE10 V_CMDTYPE10(1U) + +#define S_CMDLEN10 21 +#define M_CMDLEN10 0xffU +#define V_CMDLEN10(x) ((x) << S_CMDLEN10) +#define G_CMDLEN10(x) (((x) >> S_CMDLEN10) & M_CMDLEN10) + +#define S_CMDADDR10 8 +#define M_CMDADDR10 0x1fffU +#define V_CMDADDR10(x) ((x) << S_CMDADDR10) +#define G_CMDADDR10(x) (((x) >> S_CMDADDR10) & M_CMDADDR10) + +#define S_WRDATAVLD10 7 +#define V_WRDATAVLD10(x) ((x) << S_WRDATAVLD10) +#define F_WRDATAVLD10 V_WRDATAVLD10(1U) + +#define S_WRDATARDY10 6 +#define V_WRDATARDY10(x) ((x) << S_WRDATARDY10) +#define F_WRDATARDY10 V_WRDATARDY10(1U) + +#define S_RDDATARDY10 5 +#define V_RDDATARDY10(x) ((x) << S_RDDATARDY10) +#define F_RDDATARDY10 V_RDDATARDY10(1U) + +#define S_RDDATAVLD10 4 +#define V_RDDATAVLD10(x) ((x) << S_RDDATAVLD10) +#define F_RDDATAVLD10 V_RDDATAVLD10(1U) + +#define S_RDDATA10 0 +#define M_RDDATA10 0xfU +#define V_RDDATA10(x) ((x) << S_RDDATA10) +#define G_RDDATA10(x) (((x) >> S_RDDATA10) & M_RDDATA10) + +#define A_MA_PM_RX_CLIENT_INTERFACE_EXTERNAL 0xa00b + +#define S_CMDVLD11 31 +#define V_CMDVLD11(x) ((x) << S_CMDVLD11) +#define F_CMDVLD11 V_CMDVLD11(1U) + +#define S_CMDRDY11 30 +#define V_CMDRDY11(x) ((x) << S_CMDRDY11) +#define F_CMDRDY11 V_CMDRDY11(1U) + +#define S_CMDTYPE11 29 +#define V_CMDTYPE11(x) ((x) << S_CMDTYPE11) +#define F_CMDTYPE11 V_CMDTYPE11(1U) + +#define S_CMDLEN11 21 +#define M_CMDLEN11 0xffU +#define V_CMDLEN11(x) ((x) << S_CMDLEN11) +#define G_CMDLEN11(x) (((x) >> S_CMDLEN11) & M_CMDLEN11) + +#define S_CMDADDR11 8 +#define M_CMDADDR11 0x1fffU +#define V_CMDADDR11(x) ((x) << S_CMDADDR11) +#define G_CMDADDR11(x) (((x) >> S_CMDADDR11) & M_CMDADDR11) + +#define S_WRDATAVLD11 7 +#define V_WRDATAVLD11(x) ((x) << S_WRDATAVLD11) +#define F_WRDATAVLD11 V_WRDATAVLD11(1U) + +#define S_WRDATARDY11 6 +#define V_WRDATARDY11(x) ((x) << S_WRDATARDY11) +#define F_WRDATARDY11 V_WRDATARDY11(1U) + +#define S_RDDATARDY11 5 +#define V_RDDATARDY11(x) ((x) << S_RDDATARDY11) +#define F_RDDATARDY11 V_RDDATARDY11(1U) + +#define S_RDDATAVLD11 4 +#define V_RDDATAVLD11(x) ((x) << S_RDDATAVLD11) +#define F_RDDATAVLD11 V_RDDATAVLD11(1U) + +#define S_RDDATA11 0 +#define M_RDDATA11 0xfU +#define V_RDDATA11(x) ((x) << S_RDDATA11) +#define G_RDDATA11(x) (((x) >> S_RDDATA11) & M_RDDATA11) + +#define A_MA_HMA_CLIENT_INTERFACE_EXTERNAL 0xa00c + +#define S_CMDVLD12 31 +#define V_CMDVLD12(x) ((x) << S_CMDVLD12) +#define F_CMDVLD12 V_CMDVLD12(1U) + +#define S_CMDRDY12 30 +#define V_CMDRDY12(x) ((x) << S_CMDRDY12) +#define F_CMDRDY12 V_CMDRDY12(1U) + +#define S_CMDTYPE12 29 +#define V_CMDTYPE12(x) ((x) << S_CMDTYPE12) +#define F_CMDTYPE12 V_CMDTYPE12(1U) + +#define S_CMDLEN12 21 +#define M_CMDLEN12 0xffU +#define V_CMDLEN12(x) ((x) << S_CMDLEN12) +#define G_CMDLEN12(x) (((x) >> S_CMDLEN12) & M_CMDLEN12) + +#define S_CMDADDR12 8 +#define M_CMDADDR12 0x1fffU +#define V_CMDADDR12(x) ((x) << S_CMDADDR12) +#define G_CMDADDR12(x) (((x) >> S_CMDADDR12) & M_CMDADDR12) + +#define S_WRDATAVLD12 7 +#define V_WRDATAVLD12(x) ((x) << S_WRDATAVLD12) +#define F_WRDATAVLD12 V_WRDATAVLD12(1U) + +#define S_WRDATARDY12 6 +#define V_WRDATARDY12(x) ((x) << S_WRDATARDY12) +#define F_WRDATARDY12 V_WRDATARDY12(1U) + +#define S_RDDATARDY12 5 +#define V_RDDATARDY12(x) ((x) << S_RDDATARDY12) +#define F_RDDATARDY12 V_RDDATARDY12(1U) + +#define S_RDDATAVLD12 4 +#define V_RDDATAVLD12(x) ((x) << S_RDDATAVLD12) +#define F_RDDATAVLD12 V_RDDATAVLD12(1U) + +#define S_RDDATA12 0 +#define M_RDDATA12 0xfU +#define V_RDDATA12(x) ((x) << S_RDDATA12) +#define G_RDDATA12(x) (((x) >> S_RDDATA12) & M_RDDATA12) + +#define A_MA_TARGET_0_ARBITER_INTERFACE_EXTERNAL_REG0 0xa00d + +#define S_CI0_ARB0_REQ 31 +#define V_CI0_ARB0_REQ(x) ((x) << S_CI0_ARB0_REQ) +#define F_CI0_ARB0_REQ V_CI0_ARB0_REQ(1U) + +#define S_ARB0_CI0_GNT 30 +#define V_ARB0_CI0_GNT(x) ((x) << S_ARB0_CI0_GNT) +#define F_ARB0_CI0_GNT V_ARB0_CI0_GNT(1U) + +#define S_CI0_DM0_WDATA_VLD 29 +#define V_CI0_DM0_WDATA_VLD(x) ((x) << S_CI0_DM0_WDATA_VLD) +#define F_CI0_DM0_WDATA_VLD V_CI0_DM0_WDATA_VLD(1U) + +#define S_DM0_CI0_RDATA_VLD 28 +#define V_DM0_CI0_RDATA_VLD(x) ((x) << S_DM0_CI0_RDATA_VLD) +#define F_DM0_CI0_RDATA_VLD V_DM0_CI0_RDATA_VLD(1U) + +#define S_CI1_ARB0_REQ 27 +#define V_CI1_ARB0_REQ(x) ((x) << S_CI1_ARB0_REQ) +#define F_CI1_ARB0_REQ V_CI1_ARB0_REQ(1U) + +#define S_ARB0_CI1_GNT 26 +#define V_ARB0_CI1_GNT(x) ((x) << S_ARB0_CI1_GNT) +#define F_ARB0_CI1_GNT V_ARB0_CI1_GNT(1U) + +#define S_CI1_DM0_WDATA_VLD 25 +#define V_CI1_DM0_WDATA_VLD(x) ((x) << S_CI1_DM0_WDATA_VLD) +#define F_CI1_DM0_WDATA_VLD V_CI1_DM0_WDATA_VLD(1U) + +#define S_DM0_CI1_RDATA_VLD 24 +#define V_DM0_CI1_RDATA_VLD(x) ((x) << S_DM0_CI1_RDATA_VLD) +#define F_DM0_CI1_RDATA_VLD V_DM0_CI1_RDATA_VLD(1U) + +#define S_CI2_ARB0_REQ 23 +#define V_CI2_ARB0_REQ(x) ((x) << S_CI2_ARB0_REQ) +#define F_CI2_ARB0_REQ V_CI2_ARB0_REQ(1U) + +#define S_ARB0_CI2_GNT 22 +#define V_ARB0_CI2_GNT(x) ((x) << S_ARB0_CI2_GNT) +#define F_ARB0_CI2_GNT V_ARB0_CI2_GNT(1U) + +#define S_CI2_DM0_WDATA_VLD 21 +#define V_CI2_DM0_WDATA_VLD(x) ((x) << S_CI2_DM0_WDATA_VLD) +#define F_CI2_DM0_WDATA_VLD V_CI2_DM0_WDATA_VLD(1U) + +#define S_DM0_CI2_RDATA_VLD 20 +#define V_DM0_CI2_RDATA_VLD(x) ((x) << S_DM0_CI2_RDATA_VLD) +#define F_DM0_CI2_RDATA_VLD V_DM0_CI2_RDATA_VLD(1U) + +#define S_CI3_ARB0_REQ 19 +#define V_CI3_ARB0_REQ(x) ((x) << S_CI3_ARB0_REQ) +#define F_CI3_ARB0_REQ V_CI3_ARB0_REQ(1U) + +#define S_ARB0_CI3_GNT 18 +#define V_ARB0_CI3_GNT(x) ((x) << S_ARB0_CI3_GNT) +#define F_ARB0_CI3_GNT V_ARB0_CI3_GNT(1U) + +#define S_CI3_DM0_WDATA_VLD 17 +#define V_CI3_DM0_WDATA_VLD(x) ((x) << S_CI3_DM0_WDATA_VLD) +#define F_CI3_DM0_WDATA_VLD V_CI3_DM0_WDATA_VLD(1U) + +#define S_DM0_CI3_RDATA_VLD 16 +#define V_DM0_CI3_RDATA_VLD(x) ((x) << S_DM0_CI3_RDATA_VLD) +#define F_DM0_CI3_RDATA_VLD V_DM0_CI3_RDATA_VLD(1U) + +#define S_CI4_ARB0_REQ 15 +#define V_CI4_ARB0_REQ(x) ((x) << S_CI4_ARB0_REQ) +#define F_CI4_ARB0_REQ V_CI4_ARB0_REQ(1U) + +#define S_ARB0_CI4_GNT 14 +#define V_ARB0_CI4_GNT(x) ((x) << S_ARB0_CI4_GNT) +#define F_ARB0_CI4_GNT V_ARB0_CI4_GNT(1U) + +#define S_CI4_DM0_WDATA_VLD 13 +#define V_CI4_DM0_WDATA_VLD(x) ((x) << S_CI4_DM0_WDATA_VLD) +#define F_CI4_DM0_WDATA_VLD V_CI4_DM0_WDATA_VLD(1U) + +#define S_DM0_CI4_RDATA_VLD 12 +#define V_DM0_CI4_RDATA_VLD(x) ((x) << S_DM0_CI4_RDATA_VLD) +#define F_DM0_CI4_RDATA_VLD V_DM0_CI4_RDATA_VLD(1U) + +#define S_CI5_ARB0_REQ 11 +#define V_CI5_ARB0_REQ(x) ((x) << S_CI5_ARB0_REQ) +#define F_CI5_ARB0_REQ V_CI5_ARB0_REQ(1U) + +#define S_ARB0_CI5_GNT 10 +#define V_ARB0_CI5_GNT(x) ((x) << S_ARB0_CI5_GNT) +#define F_ARB0_CI5_GNT V_ARB0_CI5_GNT(1U) + +#define S_CI5_DM0_WDATA_VLD 9 +#define V_CI5_DM0_WDATA_VLD(x) ((x) << S_CI5_DM0_WDATA_VLD) +#define F_CI5_DM0_WDATA_VLD V_CI5_DM0_WDATA_VLD(1U) + +#define S_DM0_CI5_RDATA_VLD 8 +#define V_DM0_CI5_RDATA_VLD(x) ((x) << S_DM0_CI5_RDATA_VLD) +#define F_DM0_CI5_RDATA_VLD V_DM0_CI5_RDATA_VLD(1U) + +#define S_CI6_ARB0_REQ 7 +#define V_CI6_ARB0_REQ(x) ((x) << S_CI6_ARB0_REQ) +#define F_CI6_ARB0_REQ V_CI6_ARB0_REQ(1U) + +#define S_ARB0_CI6_GNT 6 +#define V_ARB0_CI6_GNT(x) ((x) << S_ARB0_CI6_GNT) +#define F_ARB0_CI6_GNT V_ARB0_CI6_GNT(1U) + +#define S_CI6_DM0_WDATA_VLD 5 +#define V_CI6_DM0_WDATA_VLD(x) ((x) << S_CI6_DM0_WDATA_VLD) +#define F_CI6_DM0_WDATA_VLD V_CI6_DM0_WDATA_VLD(1U) + +#define S_DM0_CI6_RDATA_VLD 4 +#define V_DM0_CI6_RDATA_VLD(x) ((x) << S_DM0_CI6_RDATA_VLD) +#define F_DM0_CI6_RDATA_VLD V_DM0_CI6_RDATA_VLD(1U) + +#define S_CI7_ARB0_REQ 3 +#define V_CI7_ARB0_REQ(x) ((x) << S_CI7_ARB0_REQ) +#define F_CI7_ARB0_REQ V_CI7_ARB0_REQ(1U) + +#define S_ARB0_CI7_GNT 2 +#define V_ARB0_CI7_GNT(x) ((x) << S_ARB0_CI7_GNT) +#define F_ARB0_CI7_GNT V_ARB0_CI7_GNT(1U) + +#define S_CI7_DM0_WDATA_VLD 1 +#define V_CI7_DM0_WDATA_VLD(x) ((x) << S_CI7_DM0_WDATA_VLD) +#define F_CI7_DM0_WDATA_VLD V_CI7_DM0_WDATA_VLD(1U) + +#define S_DM0_CI7_RDATA_VLD 0 +#define V_DM0_CI7_RDATA_VLD(x) ((x) << S_DM0_CI7_RDATA_VLD) +#define F_DM0_CI7_RDATA_VLD V_DM0_CI7_RDATA_VLD(1U) + +#define A_MA_TARGET_1_ARBITER_INTERFACE_EXTERNAL_REG0 0xa00e + +#define S_CI0_ARB1_REQ 31 +#define V_CI0_ARB1_REQ(x) ((x) << S_CI0_ARB1_REQ) +#define F_CI0_ARB1_REQ V_CI0_ARB1_REQ(1U) + +#define S_ARB1_CI0_GNT 30 +#define V_ARB1_CI0_GNT(x) ((x) << S_ARB1_CI0_GNT) +#define F_ARB1_CI0_GNT V_ARB1_CI0_GNT(1U) + +#define S_CI0_DM1_WDATA_VLD 29 +#define V_CI0_DM1_WDATA_VLD(x) ((x) << S_CI0_DM1_WDATA_VLD) +#define F_CI0_DM1_WDATA_VLD V_CI0_DM1_WDATA_VLD(1U) + +#define S_DM1_CI0_RDATA_VLD 28 +#define V_DM1_CI0_RDATA_VLD(x) ((x) << S_DM1_CI0_RDATA_VLD) +#define F_DM1_CI0_RDATA_VLD V_DM1_CI0_RDATA_VLD(1U) + +#define S_CI1_ARB1_REQ 27 +#define V_CI1_ARB1_REQ(x) ((x) << S_CI1_ARB1_REQ) +#define F_CI1_ARB1_REQ V_CI1_ARB1_REQ(1U) + +#define S_ARB1_CI1_GNT 26 +#define V_ARB1_CI1_GNT(x) ((x) << S_ARB1_CI1_GNT) +#define F_ARB1_CI1_GNT V_ARB1_CI1_GNT(1U) + +#define S_CI1_DM1_WDATA_VLD 25 +#define V_CI1_DM1_WDATA_VLD(x) ((x) << S_CI1_DM1_WDATA_VLD) +#define F_CI1_DM1_WDATA_VLD V_CI1_DM1_WDATA_VLD(1U) + +#define S_DM1_CI1_RDATA_VLD 24 +#define V_DM1_CI1_RDATA_VLD(x) ((x) << S_DM1_CI1_RDATA_VLD) +#define F_DM1_CI1_RDATA_VLD V_DM1_CI1_RDATA_VLD(1U) + +#define S_CI2_ARB1_REQ 23 +#define V_CI2_ARB1_REQ(x) ((x) << S_CI2_ARB1_REQ) +#define F_CI2_ARB1_REQ V_CI2_ARB1_REQ(1U) + +#define S_ARB1_CI2_GNT 22 +#define V_ARB1_CI2_GNT(x) ((x) << S_ARB1_CI2_GNT) +#define F_ARB1_CI2_GNT V_ARB1_CI2_GNT(1U) + +#define S_CI2_DM1_WDATA_VLD 21 +#define V_CI2_DM1_WDATA_VLD(x) ((x) << S_CI2_DM1_WDATA_VLD) +#define F_CI2_DM1_WDATA_VLD V_CI2_DM1_WDATA_VLD(1U) + +#define S_DM1_CI2_RDATA_VLD 20 +#define V_DM1_CI2_RDATA_VLD(x) ((x) << S_DM1_CI2_RDATA_VLD) +#define F_DM1_CI2_RDATA_VLD V_DM1_CI2_RDATA_VLD(1U) + +#define S_CI3_ARB1_REQ 19 +#define V_CI3_ARB1_REQ(x) ((x) << S_CI3_ARB1_REQ) +#define F_CI3_ARB1_REQ V_CI3_ARB1_REQ(1U) + +#define S_ARB1_CI3_GNT 18 +#define V_ARB1_CI3_GNT(x) ((x) << S_ARB1_CI3_GNT) +#define F_ARB1_CI3_GNT V_ARB1_CI3_GNT(1U) + +#define S_CI3_DM1_WDATA_VLD 17 +#define V_CI3_DM1_WDATA_VLD(x) ((x) << S_CI3_DM1_WDATA_VLD) +#define F_CI3_DM1_WDATA_VLD V_CI3_DM1_WDATA_VLD(1U) + +#define S_DM1_CI3_RDATA_VLD 16 +#define V_DM1_CI3_RDATA_VLD(x) ((x) << S_DM1_CI3_RDATA_VLD) +#define F_DM1_CI3_RDATA_VLD V_DM1_CI3_RDATA_VLD(1U) + +#define S_CI4_ARB1_REQ 15 +#define V_CI4_ARB1_REQ(x) ((x) << S_CI4_ARB1_REQ) +#define F_CI4_ARB1_REQ V_CI4_ARB1_REQ(1U) + +#define S_ARB1_CI4_GNT 14 +#define V_ARB1_CI4_GNT(x) ((x) << S_ARB1_CI4_GNT) +#define F_ARB1_CI4_GNT V_ARB1_CI4_GNT(1U) + +#define S_CI4_DM1_WDATA_VLD 13 +#define V_CI4_DM1_WDATA_VLD(x) ((x) << S_CI4_DM1_WDATA_VLD) +#define F_CI4_DM1_WDATA_VLD V_CI4_DM1_WDATA_VLD(1U) + +#define S_DM1_CI4_RDATA_VLD 12 +#define V_DM1_CI4_RDATA_VLD(x) ((x) << S_DM1_CI4_RDATA_VLD) +#define F_DM1_CI4_RDATA_VLD V_DM1_CI4_RDATA_VLD(1U) + +#define S_CI5_ARB1_REQ 11 +#define V_CI5_ARB1_REQ(x) ((x) << S_CI5_ARB1_REQ) +#define F_CI5_ARB1_REQ V_CI5_ARB1_REQ(1U) + +#define S_ARB1_CI5_GNT 10 +#define V_ARB1_CI5_GNT(x) ((x) << S_ARB1_CI5_GNT) +#define F_ARB1_CI5_GNT V_ARB1_CI5_GNT(1U) + +#define S_CI5_DM1_WDATA_VLD 9 +#define V_CI5_DM1_WDATA_VLD(x) ((x) << S_CI5_DM1_WDATA_VLD) +#define F_CI5_DM1_WDATA_VLD V_CI5_DM1_WDATA_VLD(1U) + +#define S_DM1_CI5_RDATA_VLD 8 +#define V_DM1_CI5_RDATA_VLD(x) ((x) << S_DM1_CI5_RDATA_VLD) +#define F_DM1_CI5_RDATA_VLD V_DM1_CI5_RDATA_VLD(1U) + +#define S_CI6_ARB1_REQ 7 +#define V_CI6_ARB1_REQ(x) ((x) << S_CI6_ARB1_REQ) +#define F_CI6_ARB1_REQ V_CI6_ARB1_REQ(1U) + +#define S_ARB1_CI6_GNT 6 +#define V_ARB1_CI6_GNT(x) ((x) << S_ARB1_CI6_GNT) +#define F_ARB1_CI6_GNT V_ARB1_CI6_GNT(1U) + +#define S_CI6_DM1_WDATA_VLD 5 +#define V_CI6_DM1_WDATA_VLD(x) ((x) << S_CI6_DM1_WDATA_VLD) +#define F_CI6_DM1_WDATA_VLD V_CI6_DM1_WDATA_VLD(1U) + +#define S_DM1_CI6_RDATA_VLD 4 +#define V_DM1_CI6_RDATA_VLD(x) ((x) << S_DM1_CI6_RDATA_VLD) +#define F_DM1_CI6_RDATA_VLD V_DM1_CI6_RDATA_VLD(1U) + +#define S_CI7_ARB1_REQ 3 +#define V_CI7_ARB1_REQ(x) ((x) << S_CI7_ARB1_REQ) +#define F_CI7_ARB1_REQ V_CI7_ARB1_REQ(1U) + +#define S_ARB1_CI7_GNT 2 +#define V_ARB1_CI7_GNT(x) ((x) << S_ARB1_CI7_GNT) +#define F_ARB1_CI7_GNT V_ARB1_CI7_GNT(1U) + +#define S_CI7_DM1_WDATA_VLD 1 +#define V_CI7_DM1_WDATA_VLD(x) ((x) << S_CI7_DM1_WDATA_VLD) +#define F_CI7_DM1_WDATA_VLD V_CI7_DM1_WDATA_VLD(1U) + +#define S_DM1_CI7_RDATA_VLD 0 +#define V_DM1_CI7_RDATA_VLD(x) ((x) << S_DM1_CI7_RDATA_VLD) +#define F_DM1_CI7_RDATA_VLD V_DM1_CI7_RDATA_VLD(1U) + +#define A_MA_TARGET_2_ARBITER_INTERFACE_EXTERNAL_REG0 0xa00f + +#define S_CI0_ARB2_REQ 31 +#define V_CI0_ARB2_REQ(x) ((x) << S_CI0_ARB2_REQ) +#define F_CI0_ARB2_REQ V_CI0_ARB2_REQ(1U) + +#define S_ARB2_CI0_GNT 30 +#define V_ARB2_CI0_GNT(x) ((x) << S_ARB2_CI0_GNT) +#define F_ARB2_CI0_GNT V_ARB2_CI0_GNT(1U) + +#define S_CI0_DM2_WDATA_VLD 29 +#define V_CI0_DM2_WDATA_VLD(x) ((x) << S_CI0_DM2_WDATA_VLD) +#define F_CI0_DM2_WDATA_VLD V_CI0_DM2_WDATA_VLD(1U) + +#define S_DM2_CI0_RDATA_VLD 28 +#define V_DM2_CI0_RDATA_VLD(x) ((x) << S_DM2_CI0_RDATA_VLD) +#define F_DM2_CI0_RDATA_VLD V_DM2_CI0_RDATA_VLD(1U) + +#define S_CI1_ARB2_REQ 27 +#define V_CI1_ARB2_REQ(x) ((x) << S_CI1_ARB2_REQ) +#define F_CI1_ARB2_REQ V_CI1_ARB2_REQ(1U) + +#define S_ARB2_CI1_GNT 26 +#define V_ARB2_CI1_GNT(x) ((x) << S_ARB2_CI1_GNT) +#define F_ARB2_CI1_GNT V_ARB2_CI1_GNT(1U) + +#define S_CI1_DM2_WDATA_VLD 25 +#define V_CI1_DM2_WDATA_VLD(x) ((x) << S_CI1_DM2_WDATA_VLD) +#define F_CI1_DM2_WDATA_VLD V_CI1_DM2_WDATA_VLD(1U) + +#define S_DM2_CI1_RDATA_VLD 24 +#define V_DM2_CI1_RDATA_VLD(x) ((x) << S_DM2_CI1_RDATA_VLD) +#define F_DM2_CI1_RDATA_VLD V_DM2_CI1_RDATA_VLD(1U) + +#define S_CI2_ARB2_REQ 23 +#define V_CI2_ARB2_REQ(x) ((x) << S_CI2_ARB2_REQ) +#define F_CI2_ARB2_REQ V_CI2_ARB2_REQ(1U) + +#define S_ARB2_CI2_GNT 22 +#define V_ARB2_CI2_GNT(x) ((x) << S_ARB2_CI2_GNT) +#define F_ARB2_CI2_GNT V_ARB2_CI2_GNT(1U) + +#define S_CI2_DM2_WDATA_VLD 21 +#define V_CI2_DM2_WDATA_VLD(x) ((x) << S_CI2_DM2_WDATA_VLD) +#define F_CI2_DM2_WDATA_VLD V_CI2_DM2_WDATA_VLD(1U) + +#define S_DM2_CI2_RDATA_VLD 20 +#define V_DM2_CI2_RDATA_VLD(x) ((x) << S_DM2_CI2_RDATA_VLD) +#define F_DM2_CI2_RDATA_VLD V_DM2_CI2_RDATA_VLD(1U) + +#define S_CI3_ARB2_REQ 19 +#define V_CI3_ARB2_REQ(x) ((x) << S_CI3_ARB2_REQ) +#define F_CI3_ARB2_REQ V_CI3_ARB2_REQ(1U) + +#define S_ARB2_CI3_GNT 18 +#define V_ARB2_CI3_GNT(x) ((x) << S_ARB2_CI3_GNT) +#define F_ARB2_CI3_GNT V_ARB2_CI3_GNT(1U) + +#define S_CI3_DM2_WDATA_VLD 17 +#define V_CI3_DM2_WDATA_VLD(x) ((x) << S_CI3_DM2_WDATA_VLD) +#define F_CI3_DM2_WDATA_VLD V_CI3_DM2_WDATA_VLD(1U) + +#define S_DM2_CI3_RDATA_VLD 16 +#define V_DM2_CI3_RDATA_VLD(x) ((x) << S_DM2_CI3_RDATA_VLD) +#define F_DM2_CI3_RDATA_VLD V_DM2_CI3_RDATA_VLD(1U) + +#define S_CI4_ARB2_REQ 15 +#define V_CI4_ARB2_REQ(x) ((x) << S_CI4_ARB2_REQ) +#define F_CI4_ARB2_REQ V_CI4_ARB2_REQ(1U) + +#define S_ARB2_CI4_GNT 14 +#define V_ARB2_CI4_GNT(x) ((x) << S_ARB2_CI4_GNT) +#define F_ARB2_CI4_GNT V_ARB2_CI4_GNT(1U) + +#define S_CI4_DM2_WDATA_VLD 13 +#define V_CI4_DM2_WDATA_VLD(x) ((x) << S_CI4_DM2_WDATA_VLD) +#define F_CI4_DM2_WDATA_VLD V_CI4_DM2_WDATA_VLD(1U) + +#define S_DM2_CI4_RDATA_VLD 12 +#define V_DM2_CI4_RDATA_VLD(x) ((x) << S_DM2_CI4_RDATA_VLD) +#define F_DM2_CI4_RDATA_VLD V_DM2_CI4_RDATA_VLD(1U) + +#define S_CI5_ARB2_REQ 11 +#define V_CI5_ARB2_REQ(x) ((x) << S_CI5_ARB2_REQ) +#define F_CI5_ARB2_REQ V_CI5_ARB2_REQ(1U) + +#define S_ARB2_CI5_GNT 10 +#define V_ARB2_CI5_GNT(x) ((x) << S_ARB2_CI5_GNT) +#define F_ARB2_CI5_GNT V_ARB2_CI5_GNT(1U) + +#define S_CI5_DM2_WDATA_VLD 9 +#define V_CI5_DM2_WDATA_VLD(x) ((x) << S_CI5_DM2_WDATA_VLD) +#define F_CI5_DM2_WDATA_VLD V_CI5_DM2_WDATA_VLD(1U) + +#define S_DM2_CI5_RDATA_VLD 8 +#define V_DM2_CI5_RDATA_VLD(x) ((x) << S_DM2_CI5_RDATA_VLD) +#define F_DM2_CI5_RDATA_VLD V_DM2_CI5_RDATA_VLD(1U) + +#define S_CI6_ARB2_REQ 7 +#define V_CI6_ARB2_REQ(x) ((x) << S_CI6_ARB2_REQ) +#define F_CI6_ARB2_REQ V_CI6_ARB2_REQ(1U) + +#define S_ARB2_CI6_GNT 6 +#define V_ARB2_CI6_GNT(x) ((x) << S_ARB2_CI6_GNT) +#define F_ARB2_CI6_GNT V_ARB2_CI6_GNT(1U) + +#define S_CI6_DM2_WDATA_VLD 5 +#define V_CI6_DM2_WDATA_VLD(x) ((x) << S_CI6_DM2_WDATA_VLD) +#define F_CI6_DM2_WDATA_VLD V_CI6_DM2_WDATA_VLD(1U) + +#define S_DM2_CI6_RDATA_VLD 4 +#define V_DM2_CI6_RDATA_VLD(x) ((x) << S_DM2_CI6_RDATA_VLD) +#define F_DM2_CI6_RDATA_VLD V_DM2_CI6_RDATA_VLD(1U) + +#define S_CI7_ARB2_REQ 3 +#define V_CI7_ARB2_REQ(x) ((x) << S_CI7_ARB2_REQ) +#define F_CI7_ARB2_REQ V_CI7_ARB2_REQ(1U) + +#define S_ARB2_CI7_GNT 2 +#define V_ARB2_CI7_GNT(x) ((x) << S_ARB2_CI7_GNT) +#define F_ARB2_CI7_GNT V_ARB2_CI7_GNT(1U) + +#define S_CI7_DM2_WDATA_VLD 1 +#define V_CI7_DM2_WDATA_VLD(x) ((x) << S_CI7_DM2_WDATA_VLD) +#define F_CI7_DM2_WDATA_VLD V_CI7_DM2_WDATA_VLD(1U) + +#define S_DM2_CI7_RDATA_VLD 0 +#define V_DM2_CI7_RDATA_VLD(x) ((x) << S_DM2_CI7_RDATA_VLD) +#define F_DM2_CI7_RDATA_VLD V_DM2_CI7_RDATA_VLD(1U) + +#define A_MA_TARGET_3_ARBITER_INTERFACE_EXTERNAL_REG0 0xa010 + +#define S_CI0_ARB3_REQ 31 +#define V_CI0_ARB3_REQ(x) ((x) << S_CI0_ARB3_REQ) +#define F_CI0_ARB3_REQ V_CI0_ARB3_REQ(1U) + +#define S_ARB3_CI0_GNT 30 +#define V_ARB3_CI0_GNT(x) ((x) << S_ARB3_CI0_GNT) +#define F_ARB3_CI0_GNT V_ARB3_CI0_GNT(1U) + +#define S_CI0_DM3_WDATA_VLD 29 +#define V_CI0_DM3_WDATA_VLD(x) ((x) << S_CI0_DM3_WDATA_VLD) +#define F_CI0_DM3_WDATA_VLD V_CI0_DM3_WDATA_VLD(1U) + +#define S_DM3_CI0_RDATA_VLD 28 +#define V_DM3_CI0_RDATA_VLD(x) ((x) << S_DM3_CI0_RDATA_VLD) +#define F_DM3_CI0_RDATA_VLD V_DM3_CI0_RDATA_VLD(1U) + +#define S_CI1_ARB3_REQ 27 +#define V_CI1_ARB3_REQ(x) ((x) << S_CI1_ARB3_REQ) +#define F_CI1_ARB3_REQ V_CI1_ARB3_REQ(1U) + +#define S_ARB3_CI1_GNT 26 +#define V_ARB3_CI1_GNT(x) ((x) << S_ARB3_CI1_GNT) +#define F_ARB3_CI1_GNT V_ARB3_CI1_GNT(1U) + +#define S_CI1_DM3_WDATA_VLD 25 +#define V_CI1_DM3_WDATA_VLD(x) ((x) << S_CI1_DM3_WDATA_VLD) +#define F_CI1_DM3_WDATA_VLD V_CI1_DM3_WDATA_VLD(1U) + +#define S_DM3_CI1_RDATA_VLD 24 +#define V_DM3_CI1_RDATA_VLD(x) ((x) << S_DM3_CI1_RDATA_VLD) +#define F_DM3_CI1_RDATA_VLD V_DM3_CI1_RDATA_VLD(1U) + +#define S_CI2_ARB3_REQ 23 +#define V_CI2_ARB3_REQ(x) ((x) << S_CI2_ARB3_REQ) +#define F_CI2_ARB3_REQ V_CI2_ARB3_REQ(1U) + +#define S_ARB3_CI2_GNT 22 +#define V_ARB3_CI2_GNT(x) ((x) << S_ARB3_CI2_GNT) +#define F_ARB3_CI2_GNT V_ARB3_CI2_GNT(1U) + +#define S_CI2_DM3_WDATA_VLD 21 +#define V_CI2_DM3_WDATA_VLD(x) ((x) << S_CI2_DM3_WDATA_VLD) +#define F_CI2_DM3_WDATA_VLD V_CI2_DM3_WDATA_VLD(1U) + +#define S_DM3_CI2_RDATA_VLD 20 +#define V_DM3_CI2_RDATA_VLD(x) ((x) << S_DM3_CI2_RDATA_VLD) +#define F_DM3_CI2_RDATA_VLD V_DM3_CI2_RDATA_VLD(1U) + +#define S_CI3_ARB3_REQ 19 +#define V_CI3_ARB3_REQ(x) ((x) << S_CI3_ARB3_REQ) +#define F_CI3_ARB3_REQ V_CI3_ARB3_REQ(1U) + +#define S_ARB3_CI3_GNT 18 +#define V_ARB3_CI3_GNT(x) ((x) << S_ARB3_CI3_GNT) +#define F_ARB3_CI3_GNT V_ARB3_CI3_GNT(1U) + +#define S_CI3_DM3_WDATA_VLD 17 +#define V_CI3_DM3_WDATA_VLD(x) ((x) << S_CI3_DM3_WDATA_VLD) +#define F_CI3_DM3_WDATA_VLD V_CI3_DM3_WDATA_VLD(1U) + +#define S_DM3_CI3_RDATA_VLD 16 +#define V_DM3_CI3_RDATA_VLD(x) ((x) << S_DM3_CI3_RDATA_VLD) +#define F_DM3_CI3_RDATA_VLD V_DM3_CI3_RDATA_VLD(1U) + +#define S_CI4_ARB3_REQ 15 +#define V_CI4_ARB3_REQ(x) ((x) << S_CI4_ARB3_REQ) +#define F_CI4_ARB3_REQ V_CI4_ARB3_REQ(1U) + +#define S_ARB3_CI4_GNT 14 +#define V_ARB3_CI4_GNT(x) ((x) << S_ARB3_CI4_GNT) +#define F_ARB3_CI4_GNT V_ARB3_CI4_GNT(1U) + +#define S_CI4_DM3_WDATA_VLD 13 +#define V_CI4_DM3_WDATA_VLD(x) ((x) << S_CI4_DM3_WDATA_VLD) +#define F_CI4_DM3_WDATA_VLD V_CI4_DM3_WDATA_VLD(1U) + +#define S_DM3_CI4_RDATA_VLD 12 +#define V_DM3_CI4_RDATA_VLD(x) ((x) << S_DM3_CI4_RDATA_VLD) +#define F_DM3_CI4_RDATA_VLD V_DM3_CI4_RDATA_VLD(1U) + +#define S_CI5_ARB3_REQ 11 +#define V_CI5_ARB3_REQ(x) ((x) << S_CI5_ARB3_REQ) +#define F_CI5_ARB3_REQ V_CI5_ARB3_REQ(1U) + +#define S_ARB3_CI5_GNT 10 +#define V_ARB3_CI5_GNT(x) ((x) << S_ARB3_CI5_GNT) +#define F_ARB3_CI5_GNT V_ARB3_CI5_GNT(1U) + +#define S_CI5_DM3_WDATA_VLD 9 +#define V_CI5_DM3_WDATA_VLD(x) ((x) << S_CI5_DM3_WDATA_VLD) +#define F_CI5_DM3_WDATA_VLD V_CI5_DM3_WDATA_VLD(1U) + +#define S_DM3_CI5_RDATA_VLD 8 +#define V_DM3_CI5_RDATA_VLD(x) ((x) << S_DM3_CI5_RDATA_VLD) +#define F_DM3_CI5_RDATA_VLD V_DM3_CI5_RDATA_VLD(1U) + +#define S_CI6_ARB3_REQ 7 +#define V_CI6_ARB3_REQ(x) ((x) << S_CI6_ARB3_REQ) +#define F_CI6_ARB3_REQ V_CI6_ARB3_REQ(1U) + +#define S_ARB3_CI6_GNT 6 +#define V_ARB3_CI6_GNT(x) ((x) << S_ARB3_CI6_GNT) +#define F_ARB3_CI6_GNT V_ARB3_CI6_GNT(1U) + +#define S_CI6_DM3_WDATA_VLD 5 +#define V_CI6_DM3_WDATA_VLD(x) ((x) << S_CI6_DM3_WDATA_VLD) +#define F_CI6_DM3_WDATA_VLD V_CI6_DM3_WDATA_VLD(1U) + +#define S_DM3_CI6_RDATA_VLD 4 +#define V_DM3_CI6_RDATA_VLD(x) ((x) << S_DM3_CI6_RDATA_VLD) +#define F_DM3_CI6_RDATA_VLD V_DM3_CI6_RDATA_VLD(1U) + +#define S_CI7_ARB3_REQ 3 +#define V_CI7_ARB3_REQ(x) ((x) << S_CI7_ARB3_REQ) +#define F_CI7_ARB3_REQ V_CI7_ARB3_REQ(1U) + +#define S_ARB3_CI7_GNT 2 +#define V_ARB3_CI7_GNT(x) ((x) << S_ARB3_CI7_GNT) +#define F_ARB3_CI7_GNT V_ARB3_CI7_GNT(1U) + +#define S_CI7_DM3_WDATA_VLD 1 +#define V_CI7_DM3_WDATA_VLD(x) ((x) << S_CI7_DM3_WDATA_VLD) +#define F_CI7_DM3_WDATA_VLD V_CI7_DM3_WDATA_VLD(1U) + +#define S_DM3_CI7_RDATA_VLD 0 +#define V_DM3_CI7_RDATA_VLD(x) ((x) << S_DM3_CI7_RDATA_VLD) +#define F_DM3_CI7_RDATA_VLD V_DM3_CI7_RDATA_VLD(1U) + +#define A_MA_MA_DEBUG_SIGNATURE_LTL_END 0xa011 +#define A_MA_MA_DEBUG_SIGNATURE_BIG_END_INVERSE 0xa012 +#define A_MA_TARGET_0_ARBITER_INTERFACE_EXTERNAL_REG1 0xa013 + +#define S_CI8_ARB0_REQ 31 +#define V_CI8_ARB0_REQ(x) ((x) << S_CI8_ARB0_REQ) +#define F_CI8_ARB0_REQ V_CI8_ARB0_REQ(1U) + +#define S_ARB0_CI8_GNT 30 +#define V_ARB0_CI8_GNT(x) ((x) << S_ARB0_CI8_GNT) +#define F_ARB0_CI8_GNT V_ARB0_CI8_GNT(1U) + +#define S_CI8_DM0_WDATA_VLD 29 +#define V_CI8_DM0_WDATA_VLD(x) ((x) << S_CI8_DM0_WDATA_VLD) +#define F_CI8_DM0_WDATA_VLD V_CI8_DM0_WDATA_VLD(1U) + +#define S_DM0_CI8_RDATA_VLD 28 +#define V_DM0_CI8_RDATA_VLD(x) ((x) << S_DM0_CI8_RDATA_VLD) +#define F_DM0_CI8_RDATA_VLD V_DM0_CI8_RDATA_VLD(1U) + +#define S_CI9_ARB0_REQ 27 +#define V_CI9_ARB0_REQ(x) ((x) << S_CI9_ARB0_REQ) +#define F_CI9_ARB0_REQ V_CI9_ARB0_REQ(1U) + +#define S_ARB0_CI9_GNT 26 +#define V_ARB0_CI9_GNT(x) ((x) << S_ARB0_CI9_GNT) +#define F_ARB0_CI9_GNT V_ARB0_CI9_GNT(1U) + +#define S_CI9_DM0_WDATA_VLD 25 +#define V_CI9_DM0_WDATA_VLD(x) ((x) << S_CI9_DM0_WDATA_VLD) +#define F_CI9_DM0_WDATA_VLD V_CI9_DM0_WDATA_VLD(1U) + +#define S_DM0_CI9_RDATA_VLD 24 +#define V_DM0_CI9_RDATA_VLD(x) ((x) << S_DM0_CI9_RDATA_VLD) +#define F_DM0_CI9_RDATA_VLD V_DM0_CI9_RDATA_VLD(1U) + +#define S_CI10_ARB0_REQ 23 +#define V_CI10_ARB0_REQ(x) ((x) << S_CI10_ARB0_REQ) +#define F_CI10_ARB0_REQ V_CI10_ARB0_REQ(1U) + +#define S_ARB0_CI10_GNT 22 +#define V_ARB0_CI10_GNT(x) ((x) << S_ARB0_CI10_GNT) +#define F_ARB0_CI10_GNT V_ARB0_CI10_GNT(1U) + +#define S_CI10_DM0_WDATA_VLD 21 +#define V_CI10_DM0_WDATA_VLD(x) ((x) << S_CI10_DM0_WDATA_VLD) +#define F_CI10_DM0_WDATA_VLD V_CI10_DM0_WDATA_VLD(1U) + +#define S_DM0_CI10_RDATA_VLD 20 +#define V_DM0_CI10_RDATA_VLD(x) ((x) << S_DM0_CI10_RDATA_VLD) +#define F_DM0_CI10_RDATA_VLD V_DM0_CI10_RDATA_VLD(1U) + +#define S_CI11_ARB0_REQ 19 +#define V_CI11_ARB0_REQ(x) ((x) << S_CI11_ARB0_REQ) +#define F_CI11_ARB0_REQ V_CI11_ARB0_REQ(1U) + +#define S_ARB0_CI11_GNT 18 +#define V_ARB0_CI11_GNT(x) ((x) << S_ARB0_CI11_GNT) +#define F_ARB0_CI11_GNT V_ARB0_CI11_GNT(1U) + +#define S_CI11_DM0_WDATA_VLD 17 +#define V_CI11_DM0_WDATA_VLD(x) ((x) << S_CI11_DM0_WDATA_VLD) +#define F_CI11_DM0_WDATA_VLD V_CI11_DM0_WDATA_VLD(1U) + +#define S_DM0_CI11_RDATA_VLD 16 +#define V_DM0_CI11_RDATA_VLD(x) ((x) << S_DM0_CI11_RDATA_VLD) +#define F_DM0_CI11_RDATA_VLD V_DM0_CI11_RDATA_VLD(1U) + +#define S_CI12_ARB0_REQ 15 +#define V_CI12_ARB0_REQ(x) ((x) << S_CI12_ARB0_REQ) +#define F_CI12_ARB0_REQ V_CI12_ARB0_REQ(1U) + +#define S_ARB0_CI12_GNT 14 +#define V_ARB0_CI12_GNT(x) ((x) << S_ARB0_CI12_GNT) +#define F_ARB0_CI12_GNT V_ARB0_CI12_GNT(1U) + +#define S_CI12_DM0_WDATA_VLD 13 +#define V_CI12_DM0_WDATA_VLD(x) ((x) << S_CI12_DM0_WDATA_VLD) +#define F_CI12_DM0_WDATA_VLD V_CI12_DM0_WDATA_VLD(1U) + +#define S_DM0_CI12_RDATA_VLD 12 +#define V_DM0_CI12_RDATA_VLD(x) ((x) << S_DM0_CI12_RDATA_VLD) +#define F_DM0_CI12_RDATA_VLD V_DM0_CI12_RDATA_VLD(1U) + +#define A_MA_TARGET_1_ARBITER_INTERFACE_EXTERNAL_REG1 0xa014 + +#define S_CI8_ARB1_REQ 31 +#define V_CI8_ARB1_REQ(x) ((x) << S_CI8_ARB1_REQ) +#define F_CI8_ARB1_REQ V_CI8_ARB1_REQ(1U) + +#define S_ARB1_CI8_GNT 30 +#define V_ARB1_CI8_GNT(x) ((x) << S_ARB1_CI8_GNT) +#define F_ARB1_CI8_GNT V_ARB1_CI8_GNT(1U) + +#define S_CI8_DM1_WDATA_VLD 29 +#define V_CI8_DM1_WDATA_VLD(x) ((x) << S_CI8_DM1_WDATA_VLD) +#define F_CI8_DM1_WDATA_VLD V_CI8_DM1_WDATA_VLD(1U) + +#define S_DM1_CI8_RDATA_VLD 28 +#define V_DM1_CI8_RDATA_VLD(x) ((x) << S_DM1_CI8_RDATA_VLD) +#define F_DM1_CI8_RDATA_VLD V_DM1_CI8_RDATA_VLD(1U) + +#define S_CI9_ARB1_REQ 27 +#define V_CI9_ARB1_REQ(x) ((x) << S_CI9_ARB1_REQ) +#define F_CI9_ARB1_REQ V_CI9_ARB1_REQ(1U) + +#define S_ARB1_CI9_GNT 26 +#define V_ARB1_CI9_GNT(x) ((x) << S_ARB1_CI9_GNT) +#define F_ARB1_CI9_GNT V_ARB1_CI9_GNT(1U) + +#define S_CI9_DM1_WDATA_VLD 25 +#define V_CI9_DM1_WDATA_VLD(x) ((x) << S_CI9_DM1_WDATA_VLD) +#define F_CI9_DM1_WDATA_VLD V_CI9_DM1_WDATA_VLD(1U) + +#define S_DM1_CI9_RDATA_VLD 24 +#define V_DM1_CI9_RDATA_VLD(x) ((x) << S_DM1_CI9_RDATA_VLD) +#define F_DM1_CI9_RDATA_VLD V_DM1_CI9_RDATA_VLD(1U) + +#define S_CI10_ARB1_REQ 23 +#define V_CI10_ARB1_REQ(x) ((x) << S_CI10_ARB1_REQ) +#define F_CI10_ARB1_REQ V_CI10_ARB1_REQ(1U) + +#define S_ARB1_CI10_GNT 22 +#define V_ARB1_CI10_GNT(x) ((x) << S_ARB1_CI10_GNT) +#define F_ARB1_CI10_GNT V_ARB1_CI10_GNT(1U) + +#define S_CI10_DM1_WDATA_VLD 21 +#define V_CI10_DM1_WDATA_VLD(x) ((x) << S_CI10_DM1_WDATA_VLD) +#define F_CI10_DM1_WDATA_VLD V_CI10_DM1_WDATA_VLD(1U) + +#define S_DM1_CI10_RDATA_VLD 20 +#define V_DM1_CI10_RDATA_VLD(x) ((x) << S_DM1_CI10_RDATA_VLD) +#define F_DM1_CI10_RDATA_VLD V_DM1_CI10_RDATA_VLD(1U) + +#define S_CI11_ARB1_REQ 19 +#define V_CI11_ARB1_REQ(x) ((x) << S_CI11_ARB1_REQ) +#define F_CI11_ARB1_REQ V_CI11_ARB1_REQ(1U) + +#define S_ARB1_CI11_GNT 18 +#define V_ARB1_CI11_GNT(x) ((x) << S_ARB1_CI11_GNT) +#define F_ARB1_CI11_GNT V_ARB1_CI11_GNT(1U) + +#define S_CI11_DM1_WDATA_VLD 17 +#define V_CI11_DM1_WDATA_VLD(x) ((x) << S_CI11_DM1_WDATA_VLD) +#define F_CI11_DM1_WDATA_VLD V_CI11_DM1_WDATA_VLD(1U) + +#define S_DM1_CI11_RDATA_VLD 16 +#define V_DM1_CI11_RDATA_VLD(x) ((x) << S_DM1_CI11_RDATA_VLD) +#define F_DM1_CI11_RDATA_VLD V_DM1_CI11_RDATA_VLD(1U) + +#define S_CI12_ARB1_REQ 15 +#define V_CI12_ARB1_REQ(x) ((x) << S_CI12_ARB1_REQ) +#define F_CI12_ARB1_REQ V_CI12_ARB1_REQ(1U) + +#define S_ARB1_CI12_GNT 14 +#define V_ARB1_CI12_GNT(x) ((x) << S_ARB1_CI12_GNT) +#define F_ARB1_CI12_GNT V_ARB1_CI12_GNT(1U) + +#define S_CI12_DM1_WDATA_VLD 13 +#define V_CI12_DM1_WDATA_VLD(x) ((x) << S_CI12_DM1_WDATA_VLD) +#define F_CI12_DM1_WDATA_VLD V_CI12_DM1_WDATA_VLD(1U) + +#define S_DM1_CI12_RDATA_VLD 12 +#define V_DM1_CI12_RDATA_VLD(x) ((x) << S_DM1_CI12_RDATA_VLD) +#define F_DM1_CI12_RDATA_VLD V_DM1_CI12_RDATA_VLD(1U) + +#define A_MA_TARGET_2_ARBITER_INTERFACE_EXTERNAL_REG1 0xa015 + +#define S_CI8_ARB2_REQ 31 +#define V_CI8_ARB2_REQ(x) ((x) << S_CI8_ARB2_REQ) +#define F_CI8_ARB2_REQ V_CI8_ARB2_REQ(1U) + +#define S_ARB2_CI8_GNT 30 +#define V_ARB2_CI8_GNT(x) ((x) << S_ARB2_CI8_GNT) +#define F_ARB2_CI8_GNT V_ARB2_CI8_GNT(1U) + +#define S_CI8_DM2_WDATA_VLD 29 +#define V_CI8_DM2_WDATA_VLD(x) ((x) << S_CI8_DM2_WDATA_VLD) +#define F_CI8_DM2_WDATA_VLD V_CI8_DM2_WDATA_VLD(1U) + +#define S_DM2_CI8_RDATA_VLD 28 +#define V_DM2_CI8_RDATA_VLD(x) ((x) << S_DM2_CI8_RDATA_VLD) +#define F_DM2_CI8_RDATA_VLD V_DM2_CI8_RDATA_VLD(1U) + +#define S_CI9_ARB2_REQ 27 +#define V_CI9_ARB2_REQ(x) ((x) << S_CI9_ARB2_REQ) +#define F_CI9_ARB2_REQ V_CI9_ARB2_REQ(1U) + +#define S_ARB2_CI9_GNT 26 +#define V_ARB2_CI9_GNT(x) ((x) << S_ARB2_CI9_GNT) +#define F_ARB2_CI9_GNT V_ARB2_CI9_GNT(1U) + +#define S_CI9_DM2_WDATA_VLD 25 +#define V_CI9_DM2_WDATA_VLD(x) ((x) << S_CI9_DM2_WDATA_VLD) +#define F_CI9_DM2_WDATA_VLD V_CI9_DM2_WDATA_VLD(1U) + +#define S_DM2_CI9_RDATA_VLD 24 +#define V_DM2_CI9_RDATA_VLD(x) ((x) << S_DM2_CI9_RDATA_VLD) +#define F_DM2_CI9_RDATA_VLD V_DM2_CI9_RDATA_VLD(1U) + +#define S_CI10_ARB2_REQ 23 +#define V_CI10_ARB2_REQ(x) ((x) << S_CI10_ARB2_REQ) +#define F_CI10_ARB2_REQ V_CI10_ARB2_REQ(1U) + +#define S_ARB2_CI10_GNT 22 +#define V_ARB2_CI10_GNT(x) ((x) << S_ARB2_CI10_GNT) +#define F_ARB2_CI10_GNT V_ARB2_CI10_GNT(1U) + +#define S_CI10_DM2_WDATA_VLD 21 +#define V_CI10_DM2_WDATA_VLD(x) ((x) << S_CI10_DM2_WDATA_VLD) +#define F_CI10_DM2_WDATA_VLD V_CI10_DM2_WDATA_VLD(1U) + +#define S_DM2_CI10_RDATA_VLD 20 +#define V_DM2_CI10_RDATA_VLD(x) ((x) << S_DM2_CI10_RDATA_VLD) +#define F_DM2_CI10_RDATA_VLD V_DM2_CI10_RDATA_VLD(1U) + +#define S_CI11_ARB2_REQ 19 +#define V_CI11_ARB2_REQ(x) ((x) << S_CI11_ARB2_REQ) +#define F_CI11_ARB2_REQ V_CI11_ARB2_REQ(1U) + +#define S_ARB2_CI11_GNT 18 +#define V_ARB2_CI11_GNT(x) ((x) << S_ARB2_CI11_GNT) +#define F_ARB2_CI11_GNT V_ARB2_CI11_GNT(1U) + +#define S_CI11_DM2_WDATA_VLD 17 +#define V_CI11_DM2_WDATA_VLD(x) ((x) << S_CI11_DM2_WDATA_VLD) +#define F_CI11_DM2_WDATA_VLD V_CI11_DM2_WDATA_VLD(1U) + +#define S_DM2_CI11_RDATA_VLD 16 +#define V_DM2_CI11_RDATA_VLD(x) ((x) << S_DM2_CI11_RDATA_VLD) +#define F_DM2_CI11_RDATA_VLD V_DM2_CI11_RDATA_VLD(1U) + +#define S_CI12_ARB2_REQ 15 +#define V_CI12_ARB2_REQ(x) ((x) << S_CI12_ARB2_REQ) +#define F_CI12_ARB2_REQ V_CI12_ARB2_REQ(1U) + +#define S_ARB2_CI12_GNT 14 +#define V_ARB2_CI12_GNT(x) ((x) << S_ARB2_CI12_GNT) +#define F_ARB2_CI12_GNT V_ARB2_CI12_GNT(1U) + +#define S_CI12_DM2_WDATA_VLD 13 +#define V_CI12_DM2_WDATA_VLD(x) ((x) << S_CI12_DM2_WDATA_VLD) +#define F_CI12_DM2_WDATA_VLD V_CI12_DM2_WDATA_VLD(1U) + +#define S_DM2_CI12_RDATA_VLD 12 +#define V_DM2_CI12_RDATA_VLD(x) ((x) << S_DM2_CI12_RDATA_VLD) +#define F_DM2_CI12_RDATA_VLD V_DM2_CI12_RDATA_VLD(1U) + +#define A_MA_TARGET_3_ARBITER_INTERFACE_EXTERNAL_REG1 0xa016 + +#define S_CI8_ARB3_REQ 31 +#define V_CI8_ARB3_REQ(x) ((x) << S_CI8_ARB3_REQ) +#define F_CI8_ARB3_REQ V_CI8_ARB3_REQ(1U) + +#define S_ARB3_CI8_GNT 30 +#define V_ARB3_CI8_GNT(x) ((x) << S_ARB3_CI8_GNT) +#define F_ARB3_CI8_GNT V_ARB3_CI8_GNT(1U) + +#define S_CI8_DM3_WDATA_VLD 29 +#define V_CI8_DM3_WDATA_VLD(x) ((x) << S_CI8_DM3_WDATA_VLD) +#define F_CI8_DM3_WDATA_VLD V_CI8_DM3_WDATA_VLD(1U) + +#define S_DM3_CI8_RDATA_VLD 28 +#define V_DM3_CI8_RDATA_VLD(x) ((x) << S_DM3_CI8_RDATA_VLD) +#define F_DM3_CI8_RDATA_VLD V_DM3_CI8_RDATA_VLD(1U) + +#define S_CI9_ARB3_REQ 27 +#define V_CI9_ARB3_REQ(x) ((x) << S_CI9_ARB3_REQ) +#define F_CI9_ARB3_REQ V_CI9_ARB3_REQ(1U) + +#define S_ARB3_CI9_GNT 26 +#define V_ARB3_CI9_GNT(x) ((x) << S_ARB3_CI9_GNT) +#define F_ARB3_CI9_GNT V_ARB3_CI9_GNT(1U) + +#define S_CI9_DM3_WDATA_VLD 25 +#define V_CI9_DM3_WDATA_VLD(x) ((x) << S_CI9_DM3_WDATA_VLD) +#define F_CI9_DM3_WDATA_VLD V_CI9_DM3_WDATA_VLD(1U) + +#define S_DM3_CI9_RDATA_VLD 24 +#define V_DM3_CI9_RDATA_VLD(x) ((x) << S_DM3_CI9_RDATA_VLD) +#define F_DM3_CI9_RDATA_VLD V_DM3_CI9_RDATA_VLD(1U) + +#define S_CI10_ARB3_REQ 23 +#define V_CI10_ARB3_REQ(x) ((x) << S_CI10_ARB3_REQ) +#define F_CI10_ARB3_REQ V_CI10_ARB3_REQ(1U) + +#define S_ARB3_CI10_GNT 22 +#define V_ARB3_CI10_GNT(x) ((x) << S_ARB3_CI10_GNT) +#define F_ARB3_CI10_GNT V_ARB3_CI10_GNT(1U) + +#define S_CI10_DM3_WDATA_VLD 21 +#define V_CI10_DM3_WDATA_VLD(x) ((x) << S_CI10_DM3_WDATA_VLD) +#define F_CI10_DM3_WDATA_VLD V_CI10_DM3_WDATA_VLD(1U) + +#define S_DM3_CI10_RDATA_VLD 20 +#define V_DM3_CI10_RDATA_VLD(x) ((x) << S_DM3_CI10_RDATA_VLD) +#define F_DM3_CI10_RDATA_VLD V_DM3_CI10_RDATA_VLD(1U) + +#define S_CI11_ARB3_REQ 19 +#define V_CI11_ARB3_REQ(x) ((x) << S_CI11_ARB3_REQ) +#define F_CI11_ARB3_REQ V_CI11_ARB3_REQ(1U) + +#define S_ARB3_CI11_GNT 18 +#define V_ARB3_CI11_GNT(x) ((x) << S_ARB3_CI11_GNT) +#define F_ARB3_CI11_GNT V_ARB3_CI11_GNT(1U) + +#define S_CI11_DM3_WDATA_VLD 17 +#define V_CI11_DM3_WDATA_VLD(x) ((x) << S_CI11_DM3_WDATA_VLD) +#define F_CI11_DM3_WDATA_VLD V_CI11_DM3_WDATA_VLD(1U) + +#define S_DM3_CI11_RDATA_VLD 16 +#define V_DM3_CI11_RDATA_VLD(x) ((x) << S_DM3_CI11_RDATA_VLD) +#define F_DM3_CI11_RDATA_VLD V_DM3_CI11_RDATA_VLD(1U) + +#define S_CI12_ARB3_REQ 15 +#define V_CI12_ARB3_REQ(x) ((x) << S_CI12_ARB3_REQ) +#define F_CI12_ARB3_REQ V_CI12_ARB3_REQ(1U) + +#define S_ARB3_CI12_GNT 14 +#define V_ARB3_CI12_GNT(x) ((x) << S_ARB3_CI12_GNT) +#define F_ARB3_CI12_GNT V_ARB3_CI12_GNT(1U) + +#define S_CI12_DM3_WDATA_VLD 13 +#define V_CI12_DM3_WDATA_VLD(x) ((x) << S_CI12_DM3_WDATA_VLD) +#define F_CI12_DM3_WDATA_VLD V_CI12_DM3_WDATA_VLD(1U) + +#define S_DM3_CI12_RDATA_VLD 12 +#define V_DM3_CI12_RDATA_VLD(x) ((x) << S_DM3_CI12_RDATA_VLD) +#define F_DM3_CI12_RDATA_VLD V_DM3_CI12_RDATA_VLD(1U) + +#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG0 0xa400 + +#define S_CMD_IN_FIFO_CNT0 30 +#define M_CMD_IN_FIFO_CNT0 0x3U +#define V_CMD_IN_FIFO_CNT0(x) ((x) << S_CMD_IN_FIFO_CNT0) +#define G_CMD_IN_FIFO_CNT0(x) (((x) >> S_CMD_IN_FIFO_CNT0) & M_CMD_IN_FIFO_CNT0) + +#define S_CMD_SPLIT_FIFO_CNT0 28 +#define M_CMD_SPLIT_FIFO_CNT0 0x3U +#define V_CMD_SPLIT_FIFO_CNT0(x) ((x) << S_CMD_SPLIT_FIFO_CNT0) +#define G_CMD_SPLIT_FIFO_CNT0(x) (((x) >> S_CMD_SPLIT_FIFO_CNT0) & M_CMD_SPLIT_FIFO_CNT0) + +#define S_CMD_THROTTLE_FIFO_CNT0 22 +#define M_CMD_THROTTLE_FIFO_CNT0 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT0(x) ((x) << S_CMD_THROTTLE_FIFO_CNT0) +#define G_CMD_THROTTLE_FIFO_CNT0(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT0) & M_CMD_THROTTLE_FIFO_CNT0) + +#define S_RD_CHNL_FIFO_CNT0 15 +#define M_RD_CHNL_FIFO_CNT0 0x7fU +#define V_RD_CHNL_FIFO_CNT0(x) ((x) << S_RD_CHNL_FIFO_CNT0) +#define G_RD_CHNL_FIFO_CNT0(x) (((x) >> S_RD_CHNL_FIFO_CNT0) & M_RD_CHNL_FIFO_CNT0) + +#define S_RD_DATA_EXT_FIFO_CNT0 13 +#define M_RD_DATA_EXT_FIFO_CNT0 0x3U +#define V_RD_DATA_EXT_FIFO_CNT0(x) ((x) << S_RD_DATA_EXT_FIFO_CNT0) +#define G_RD_DATA_EXT_FIFO_CNT0(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT0) & M_RD_DATA_EXT_FIFO_CNT0) + +#define S_RD_DATA_512B_FIFO_CNT0 5 +#define M_RD_DATA_512B_FIFO_CNT0 0xffU +#define V_RD_DATA_512B_FIFO_CNT0(x) ((x) << S_RD_DATA_512B_FIFO_CNT0) +#define G_RD_DATA_512B_FIFO_CNT0(x) (((x) >> S_RD_DATA_512B_FIFO_CNT0) & M_RD_DATA_512B_FIFO_CNT0) + +#define S_RD_REQ_TAG_FIFO_CNT0 1 +#define M_RD_REQ_TAG_FIFO_CNT0 0xfU +#define V_RD_REQ_TAG_FIFO_CNT0(x) ((x) << S_RD_REQ_TAG_FIFO_CNT0) +#define G_RD_REQ_TAG_FIFO_CNT0(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT0) & M_RD_REQ_TAG_FIFO_CNT0) + +#define A_MA_SGE_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG0 0xa401 + +#define S_CMD_IN_FIFO_CNT1 30 +#define M_CMD_IN_FIFO_CNT1 0x3U +#define V_CMD_IN_FIFO_CNT1(x) ((x) << S_CMD_IN_FIFO_CNT1) +#define G_CMD_IN_FIFO_CNT1(x) (((x) >> S_CMD_IN_FIFO_CNT1) & M_CMD_IN_FIFO_CNT1) + +#define S_CMD_SPLIT_FIFO_CNT1 28 +#define M_CMD_SPLIT_FIFO_CNT1 0x3U +#define V_CMD_SPLIT_FIFO_CNT1(x) ((x) << S_CMD_SPLIT_FIFO_CNT1) +#define G_CMD_SPLIT_FIFO_CNT1(x) (((x) >> S_CMD_SPLIT_FIFO_CNT1) & M_CMD_SPLIT_FIFO_CNT1) + +#define S_CMD_THROTTLE_FIFO_CNT1 22 +#define M_CMD_THROTTLE_FIFO_CNT1 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT1(x) ((x) << S_CMD_THROTTLE_FIFO_CNT1) +#define G_CMD_THROTTLE_FIFO_CNT1(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT1) & M_CMD_THROTTLE_FIFO_CNT1) + +#define S_RD_CHNL_FIFO_CNT1 15 +#define M_RD_CHNL_FIFO_CNT1 0x7fU +#define V_RD_CHNL_FIFO_CNT1(x) ((x) << S_RD_CHNL_FIFO_CNT1) +#define G_RD_CHNL_FIFO_CNT1(x) (((x) >> S_RD_CHNL_FIFO_CNT1) & M_RD_CHNL_FIFO_CNT1) + +#define S_RD_DATA_EXT_FIFO_CNT1 13 +#define M_RD_DATA_EXT_FIFO_CNT1 0x3U +#define V_RD_DATA_EXT_FIFO_CNT1(x) ((x) << S_RD_DATA_EXT_FIFO_CNT1) +#define G_RD_DATA_EXT_FIFO_CNT1(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT1) & M_RD_DATA_EXT_FIFO_CNT1) + +#define S_RD_DATA_512B_FIFO_CNT1 5 +#define M_RD_DATA_512B_FIFO_CNT1 0xffU +#define V_RD_DATA_512B_FIFO_CNT1(x) ((x) << S_RD_DATA_512B_FIFO_CNT1) +#define G_RD_DATA_512B_FIFO_CNT1(x) (((x) >> S_RD_DATA_512B_FIFO_CNT1) & M_RD_DATA_512B_FIFO_CNT1) + +#define S_RD_REQ_TAG_FIFO_CNT1 1 +#define M_RD_REQ_TAG_FIFO_CNT1 0xfU +#define V_RD_REQ_TAG_FIFO_CNT1(x) ((x) << S_RD_REQ_TAG_FIFO_CNT1) +#define G_RD_REQ_TAG_FIFO_CNT1(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT1) & M_RD_REQ_TAG_FIFO_CNT1) + +#define A_MA_ULP_TX_CLIENT_INTERFACE_INTERNAL_REG0 0xa402 + +#define S_CMD_IN_FIFO_CNT2 30 +#define M_CMD_IN_FIFO_CNT2 0x3U +#define V_CMD_IN_FIFO_CNT2(x) ((x) << S_CMD_IN_FIFO_CNT2) +#define G_CMD_IN_FIFO_CNT2(x) (((x) >> S_CMD_IN_FIFO_CNT2) & M_CMD_IN_FIFO_CNT2) + +#define S_CMD_SPLIT_FIFO_CNT2 28 +#define M_CMD_SPLIT_FIFO_CNT2 0x3U +#define V_CMD_SPLIT_FIFO_CNT2(x) ((x) << S_CMD_SPLIT_FIFO_CNT2) +#define G_CMD_SPLIT_FIFO_CNT2(x) (((x) >> S_CMD_SPLIT_FIFO_CNT2) & M_CMD_SPLIT_FIFO_CNT2) + +#define S_CMD_THROTTLE_FIFO_CNT2 22 +#define M_CMD_THROTTLE_FIFO_CNT2 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT2(x) ((x) << S_CMD_THROTTLE_FIFO_CNT2) +#define G_CMD_THROTTLE_FIFO_CNT2(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT2) & M_CMD_THROTTLE_FIFO_CNT2) + +#define S_RD_CHNL_FIFO_CNT2 15 +#define M_RD_CHNL_FIFO_CNT2 0x7fU +#define V_RD_CHNL_FIFO_CNT2(x) ((x) << S_RD_CHNL_FIFO_CNT2) +#define G_RD_CHNL_FIFO_CNT2(x) (((x) >> S_RD_CHNL_FIFO_CNT2) & M_RD_CHNL_FIFO_CNT2) + +#define S_RD_DATA_EXT_FIFO_CNT2 13 +#define M_RD_DATA_EXT_FIFO_CNT2 0x3U +#define V_RD_DATA_EXT_FIFO_CNT2(x) ((x) << S_RD_DATA_EXT_FIFO_CNT2) +#define G_RD_DATA_EXT_FIFO_CNT2(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT2) & M_RD_DATA_EXT_FIFO_CNT2) + +#define S_RD_DATA_512B_FIFO_CNT2 5 +#define M_RD_DATA_512B_FIFO_CNT2 0xffU +#define V_RD_DATA_512B_FIFO_CNT2(x) ((x) << S_RD_DATA_512B_FIFO_CNT2) +#define G_RD_DATA_512B_FIFO_CNT2(x) (((x) >> S_RD_DATA_512B_FIFO_CNT2) & M_RD_DATA_512B_FIFO_CNT2) + +#define S_RD_REQ_TAG_FIFO_CNT2 1 +#define M_RD_REQ_TAG_FIFO_CNT2 0xfU +#define V_RD_REQ_TAG_FIFO_CNT2(x) ((x) << S_RD_REQ_TAG_FIFO_CNT2) +#define G_RD_REQ_TAG_FIFO_CNT2(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT2) & M_RD_REQ_TAG_FIFO_CNT2) + +#define A_MA_ULP_RX_CLIENT_INTERFACE_INTERNAL_REG0 0xa403 + +#define S_CMD_IN_FIFO_CNT3 30 +#define M_CMD_IN_FIFO_CNT3 0x3U +#define V_CMD_IN_FIFO_CNT3(x) ((x) << S_CMD_IN_FIFO_CNT3) +#define G_CMD_IN_FIFO_CNT3(x) (((x) >> S_CMD_IN_FIFO_CNT3) & M_CMD_IN_FIFO_CNT3) + +#define S_CMD_SPLIT_FIFO_CNT3 28 +#define M_CMD_SPLIT_FIFO_CNT3 0x3U +#define V_CMD_SPLIT_FIFO_CNT3(x) ((x) << S_CMD_SPLIT_FIFO_CNT3) +#define G_CMD_SPLIT_FIFO_CNT3(x) (((x) >> S_CMD_SPLIT_FIFO_CNT3) & M_CMD_SPLIT_FIFO_CNT3) + +#define S_CMD_THROTTLE_FIFO_CNT3 22 +#define M_CMD_THROTTLE_FIFO_CNT3 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT3(x) ((x) << S_CMD_THROTTLE_FIFO_CNT3) +#define G_CMD_THROTTLE_FIFO_CNT3(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT3) & M_CMD_THROTTLE_FIFO_CNT3) + +#define S_RD_CHNL_FIFO_CNT3 15 +#define M_RD_CHNL_FIFO_CNT3 0x7fU +#define V_RD_CHNL_FIFO_CNT3(x) ((x) << S_RD_CHNL_FIFO_CNT3) +#define G_RD_CHNL_FIFO_CNT3(x) (((x) >> S_RD_CHNL_FIFO_CNT3) & M_RD_CHNL_FIFO_CNT3) + +#define S_RD_DATA_EXT_FIFO_CNT3 13 +#define M_RD_DATA_EXT_FIFO_CNT3 0x3U +#define V_RD_DATA_EXT_FIFO_CNT3(x) ((x) << S_RD_DATA_EXT_FIFO_CNT3) +#define G_RD_DATA_EXT_FIFO_CNT3(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT3) & M_RD_DATA_EXT_FIFO_CNT3) + +#define S_RD_DATA_512B_FIFO_CNT3 5 +#define M_RD_DATA_512B_FIFO_CNT3 0xffU +#define V_RD_DATA_512B_FIFO_CNT3(x) ((x) << S_RD_DATA_512B_FIFO_CNT3) +#define G_RD_DATA_512B_FIFO_CNT3(x) (((x) >> S_RD_DATA_512B_FIFO_CNT3) & M_RD_DATA_512B_FIFO_CNT3) + +#define S_RD_REQ_TAG_FIFO_CNT3 1 +#define M_RD_REQ_TAG_FIFO_CNT3 0xfU +#define V_RD_REQ_TAG_FIFO_CNT3(x) ((x) << S_RD_REQ_TAG_FIFO_CNT3) +#define G_RD_REQ_TAG_FIFO_CNT3(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT3) & M_RD_REQ_TAG_FIFO_CNT3) + +#define A_MA_ULP_TX_RX_CLIENT_INTERFACE_INTERNAL_REG0 0xa404 + +#define S_CMD_IN_FIFO_CNT4 30 +#define M_CMD_IN_FIFO_CNT4 0x3U +#define V_CMD_IN_FIFO_CNT4(x) ((x) << S_CMD_IN_FIFO_CNT4) +#define G_CMD_IN_FIFO_CNT4(x) (((x) >> S_CMD_IN_FIFO_CNT4) & M_CMD_IN_FIFO_CNT4) + +#define S_CMD_SPLIT_FIFO_CNT4 28 +#define M_CMD_SPLIT_FIFO_CNT4 0x3U +#define V_CMD_SPLIT_FIFO_CNT4(x) ((x) << S_CMD_SPLIT_FIFO_CNT4) +#define G_CMD_SPLIT_FIFO_CNT4(x) (((x) >> S_CMD_SPLIT_FIFO_CNT4) & M_CMD_SPLIT_FIFO_CNT4) + +#define S_CMD_THROTTLE_FIFO_CNT4 22 +#define M_CMD_THROTTLE_FIFO_CNT4 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT4(x) ((x) << S_CMD_THROTTLE_FIFO_CNT4) +#define G_CMD_THROTTLE_FIFO_CNT4(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT4) & M_CMD_THROTTLE_FIFO_CNT4) + +#define S_RD_CHNL_FIFO_CNT4 15 +#define M_RD_CHNL_FIFO_CNT4 0x7fU +#define V_RD_CHNL_FIFO_CNT4(x) ((x) << S_RD_CHNL_FIFO_CNT4) +#define G_RD_CHNL_FIFO_CNT4(x) (((x) >> S_RD_CHNL_FIFO_CNT4) & M_RD_CHNL_FIFO_CNT4) + +#define S_RD_DATA_EXT_FIFO_CNT4 13 +#define M_RD_DATA_EXT_FIFO_CNT4 0x3U +#define V_RD_DATA_EXT_FIFO_CNT4(x) ((x) << S_RD_DATA_EXT_FIFO_CNT4) +#define G_RD_DATA_EXT_FIFO_CNT4(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT4) & M_RD_DATA_EXT_FIFO_CNT4) + +#define S_RD_DATA_512B_FIFO_CNT4 5 +#define M_RD_DATA_512B_FIFO_CNT4 0xffU +#define V_RD_DATA_512B_FIFO_CNT4(x) ((x) << S_RD_DATA_512B_FIFO_CNT4) +#define G_RD_DATA_512B_FIFO_CNT4(x) (((x) >> S_RD_DATA_512B_FIFO_CNT4) & M_RD_DATA_512B_FIFO_CNT4) + +#define S_RD_REQ_TAG_FIFO_CNT4 1 +#define M_RD_REQ_TAG_FIFO_CNT4 0xfU +#define V_RD_REQ_TAG_FIFO_CNT4(x) ((x) << S_RD_REQ_TAG_FIFO_CNT4) +#define G_RD_REQ_TAG_FIFO_CNT4(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT4) & M_RD_REQ_TAG_FIFO_CNT4) + +#define A_MA_TP_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG0 0xa405 + +#define S_CMD_IN_FIFO_CNT5 30 +#define M_CMD_IN_FIFO_CNT5 0x3U +#define V_CMD_IN_FIFO_CNT5(x) ((x) << S_CMD_IN_FIFO_CNT5) +#define G_CMD_IN_FIFO_CNT5(x) (((x) >> S_CMD_IN_FIFO_CNT5) & M_CMD_IN_FIFO_CNT5) + +#define S_CMD_SPLIT_FIFO_CNT5 28 +#define M_CMD_SPLIT_FIFO_CNT5 0x3U +#define V_CMD_SPLIT_FIFO_CNT5(x) ((x) << S_CMD_SPLIT_FIFO_CNT5) +#define G_CMD_SPLIT_FIFO_CNT5(x) (((x) >> S_CMD_SPLIT_FIFO_CNT5) & M_CMD_SPLIT_FIFO_CNT5) + +#define S_CMD_THROTTLE_FIFO_CNT5 22 +#define M_CMD_THROTTLE_FIFO_CNT5 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT5(x) ((x) << S_CMD_THROTTLE_FIFO_CNT5) +#define G_CMD_THROTTLE_FIFO_CNT5(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT5) & M_CMD_THROTTLE_FIFO_CNT5) + +#define S_RD_CHNL_FIFO_CNT5 15 +#define M_RD_CHNL_FIFO_CNT5 0x7fU +#define V_RD_CHNL_FIFO_CNT5(x) ((x) << S_RD_CHNL_FIFO_CNT5) +#define G_RD_CHNL_FIFO_CNT5(x) (((x) >> S_RD_CHNL_FIFO_CNT5) & M_RD_CHNL_FIFO_CNT5) + +#define S_RD_DATA_EXT_FIFO_CNT5 13 +#define M_RD_DATA_EXT_FIFO_CNT5 0x3U +#define V_RD_DATA_EXT_FIFO_CNT5(x) ((x) << S_RD_DATA_EXT_FIFO_CNT5) +#define G_RD_DATA_EXT_FIFO_CNT5(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT5) & M_RD_DATA_EXT_FIFO_CNT5) + +#define S_RD_DATA_512B_FIFO_CNT5 5 +#define M_RD_DATA_512B_FIFO_CNT5 0xffU +#define V_RD_DATA_512B_FIFO_CNT5(x) ((x) << S_RD_DATA_512B_FIFO_CNT5) +#define G_RD_DATA_512B_FIFO_CNT5(x) (((x) >> S_RD_DATA_512B_FIFO_CNT5) & M_RD_DATA_512B_FIFO_CNT5) + +#define S_RD_REQ_TAG_FIFO_CNT5 1 +#define M_RD_REQ_TAG_FIFO_CNT5 0xfU +#define V_RD_REQ_TAG_FIFO_CNT5(x) ((x) << S_RD_REQ_TAG_FIFO_CNT5) +#define G_RD_REQ_TAG_FIFO_CNT5(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT5) & M_RD_REQ_TAG_FIFO_CNT5) + +#define A_MA_TP_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG0 0xa406 + +#define S_CMD_IN_FIFO_CNT6 30 +#define M_CMD_IN_FIFO_CNT6 0x3U +#define V_CMD_IN_FIFO_CNT6(x) ((x) << S_CMD_IN_FIFO_CNT6) +#define G_CMD_IN_FIFO_CNT6(x) (((x) >> S_CMD_IN_FIFO_CNT6) & M_CMD_IN_FIFO_CNT6) + +#define S_CMD_SPLIT_FIFO_CNT6 28 +#define M_CMD_SPLIT_FIFO_CNT6 0x3U +#define V_CMD_SPLIT_FIFO_CNT6(x) ((x) << S_CMD_SPLIT_FIFO_CNT6) +#define G_CMD_SPLIT_FIFO_CNT6(x) (((x) >> S_CMD_SPLIT_FIFO_CNT6) & M_CMD_SPLIT_FIFO_CNT6) + +#define S_CMD_THROTTLE_FIFO_CNT6 22 +#define M_CMD_THROTTLE_FIFO_CNT6 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT6(x) ((x) << S_CMD_THROTTLE_FIFO_CNT6) +#define G_CMD_THROTTLE_FIFO_CNT6(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT6) & M_CMD_THROTTLE_FIFO_CNT6) + +#define S_RD_CHNL_FIFO_CNT6 15 +#define M_RD_CHNL_FIFO_CNT6 0x7fU +#define V_RD_CHNL_FIFO_CNT6(x) ((x) << S_RD_CHNL_FIFO_CNT6) +#define G_RD_CHNL_FIFO_CNT6(x) (((x) >> S_RD_CHNL_FIFO_CNT6) & M_RD_CHNL_FIFO_CNT6) + +#define S_RD_DATA_EXT_FIFO_CNT6 13 +#define M_RD_DATA_EXT_FIFO_CNT6 0x3U +#define V_RD_DATA_EXT_FIFO_CNT6(x) ((x) << S_RD_DATA_EXT_FIFO_CNT6) +#define G_RD_DATA_EXT_FIFO_CNT6(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT6) & M_RD_DATA_EXT_FIFO_CNT6) + +#define S_RD_DATA_512B_FIFO_CNT6 5 +#define M_RD_DATA_512B_FIFO_CNT6 0xffU +#define V_RD_DATA_512B_FIFO_CNT6(x) ((x) << S_RD_DATA_512B_FIFO_CNT6) +#define G_RD_DATA_512B_FIFO_CNT6(x) (((x) >> S_RD_DATA_512B_FIFO_CNT6) & M_RD_DATA_512B_FIFO_CNT6) + +#define S_RD_REQ_TAG_FIFO_CNT6 1 +#define M_RD_REQ_TAG_FIFO_CNT6 0xfU +#define V_RD_REQ_TAG_FIFO_CNT6(x) ((x) << S_RD_REQ_TAG_FIFO_CNT6) +#define G_RD_REQ_TAG_FIFO_CNT6(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT6) & M_RD_REQ_TAG_FIFO_CNT6) + +#define A_MA_LE_CLIENT_INTERFACE_INTERNAL_REG0 0xa407 + +#define S_CMD_IN_FIFO_CNT7 30 +#define M_CMD_IN_FIFO_CNT7 0x3U +#define V_CMD_IN_FIFO_CNT7(x) ((x) << S_CMD_IN_FIFO_CNT7) +#define G_CMD_IN_FIFO_CNT7(x) (((x) >> S_CMD_IN_FIFO_CNT7) & M_CMD_IN_FIFO_CNT7) + +#define S_CMD_SPLIT_FIFO_CNT7 28 +#define M_CMD_SPLIT_FIFO_CNT7 0x3U +#define V_CMD_SPLIT_FIFO_CNT7(x) ((x) << S_CMD_SPLIT_FIFO_CNT7) +#define G_CMD_SPLIT_FIFO_CNT7(x) (((x) >> S_CMD_SPLIT_FIFO_CNT7) & M_CMD_SPLIT_FIFO_CNT7) + +#define S_CMD_THROTTLE_FIFO_CNT7 22 +#define M_CMD_THROTTLE_FIFO_CNT7 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT7(x) ((x) << S_CMD_THROTTLE_FIFO_CNT7) +#define G_CMD_THROTTLE_FIFO_CNT7(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT7) & M_CMD_THROTTLE_FIFO_CNT7) + +#define S_RD_CHNL_FIFO_CNT7 15 +#define M_RD_CHNL_FIFO_CNT7 0x7fU +#define V_RD_CHNL_FIFO_CNT7(x) ((x) << S_RD_CHNL_FIFO_CNT7) +#define G_RD_CHNL_FIFO_CNT7(x) (((x) >> S_RD_CHNL_FIFO_CNT7) & M_RD_CHNL_FIFO_CNT7) + +#define S_RD_DATA_EXT_FIFO_CNT7 13 +#define M_RD_DATA_EXT_FIFO_CNT7 0x3U +#define V_RD_DATA_EXT_FIFO_CNT7(x) ((x) << S_RD_DATA_EXT_FIFO_CNT7) +#define G_RD_DATA_EXT_FIFO_CNT7(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT7) & M_RD_DATA_EXT_FIFO_CNT7) + +#define S_RD_DATA_512B_FIFO_CNT7 5 +#define M_RD_DATA_512B_FIFO_CNT7 0xffU +#define V_RD_DATA_512B_FIFO_CNT7(x) ((x) << S_RD_DATA_512B_FIFO_CNT7) +#define G_RD_DATA_512B_FIFO_CNT7(x) (((x) >> S_RD_DATA_512B_FIFO_CNT7) & M_RD_DATA_512B_FIFO_CNT7) + +#define S_RD_REQ_TAG_FIFO_CNT7 1 +#define M_RD_REQ_TAG_FIFO_CNT7 0xfU +#define V_RD_REQ_TAG_FIFO_CNT7(x) ((x) << S_RD_REQ_TAG_FIFO_CNT7) +#define G_RD_REQ_TAG_FIFO_CNT7(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT7) & M_RD_REQ_TAG_FIFO_CNT7) + +#define A_MA_CIM_CLIENT_INTERFACE_INTERNAL_REG0 0xa408 + +#define S_CMD_IN_FIFO_CNT8 30 +#define M_CMD_IN_FIFO_CNT8 0x3U +#define V_CMD_IN_FIFO_CNT8(x) ((x) << S_CMD_IN_FIFO_CNT8) +#define G_CMD_IN_FIFO_CNT8(x) (((x) >> S_CMD_IN_FIFO_CNT8) & M_CMD_IN_FIFO_CNT8) + +#define S_CMD_SPLIT_FIFO_CNT8 28 +#define M_CMD_SPLIT_FIFO_CNT8 0x3U +#define V_CMD_SPLIT_FIFO_CNT8(x) ((x) << S_CMD_SPLIT_FIFO_CNT8) +#define G_CMD_SPLIT_FIFO_CNT8(x) (((x) >> S_CMD_SPLIT_FIFO_CNT8) & M_CMD_SPLIT_FIFO_CNT8) + +#define S_CMD_THROTTLE_FIFO_CNT8 22 +#define M_CMD_THROTTLE_FIFO_CNT8 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT8(x) ((x) << S_CMD_THROTTLE_FIFO_CNT8) +#define G_CMD_THROTTLE_FIFO_CNT8(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT8) & M_CMD_THROTTLE_FIFO_CNT8) + +#define S_RD_CHNL_FIFO_CNT8 15 +#define M_RD_CHNL_FIFO_CNT8 0x7fU +#define V_RD_CHNL_FIFO_CNT8(x) ((x) << S_RD_CHNL_FIFO_CNT8) +#define G_RD_CHNL_FIFO_CNT8(x) (((x) >> S_RD_CHNL_FIFO_CNT8) & M_RD_CHNL_FIFO_CNT8) + +#define S_RD_DATA_EXT_FIFO_CNT8 13 +#define M_RD_DATA_EXT_FIFO_CNT8 0x3U +#define V_RD_DATA_EXT_FIFO_CNT8(x) ((x) << S_RD_DATA_EXT_FIFO_CNT8) +#define G_RD_DATA_EXT_FIFO_CNT8(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT8) & M_RD_DATA_EXT_FIFO_CNT8) + +#define S_RD_DATA_512B_FIFO_CNT8 5 +#define M_RD_DATA_512B_FIFO_CNT8 0xffU +#define V_RD_DATA_512B_FIFO_CNT8(x) ((x) << S_RD_DATA_512B_FIFO_CNT8) +#define G_RD_DATA_512B_FIFO_CNT8(x) (((x) >> S_RD_DATA_512B_FIFO_CNT8) & M_RD_DATA_512B_FIFO_CNT8) + +#define S_RD_REQ_TAG_FIFO_CNT8 1 +#define M_RD_REQ_TAG_FIFO_CNT8 0xfU +#define V_RD_REQ_TAG_FIFO_CNT8(x) ((x) << S_RD_REQ_TAG_FIFO_CNT8) +#define G_RD_REQ_TAG_FIFO_CNT8(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT8) & M_RD_REQ_TAG_FIFO_CNT8) + +#define A_MA_PCIE_CLIENT_INTERFACE_INTERNAL_REG0 0xa409 + +#define S_CMD_IN_FIFO_CNT9 30 +#define M_CMD_IN_FIFO_CNT9 0x3U +#define V_CMD_IN_FIFO_CNT9(x) ((x) << S_CMD_IN_FIFO_CNT9) +#define G_CMD_IN_FIFO_CNT9(x) (((x) >> S_CMD_IN_FIFO_CNT9) & M_CMD_IN_FIFO_CNT9) + +#define S_CMD_SPLIT_FIFO_CNT9 28 +#define M_CMD_SPLIT_FIFO_CNT9 0x3U +#define V_CMD_SPLIT_FIFO_CNT9(x) ((x) << S_CMD_SPLIT_FIFO_CNT9) +#define G_CMD_SPLIT_FIFO_CNT9(x) (((x) >> S_CMD_SPLIT_FIFO_CNT9) & M_CMD_SPLIT_FIFO_CNT9) + +#define S_CMD_THROTTLE_FIFO_CNT9 22 +#define M_CMD_THROTTLE_FIFO_CNT9 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT9(x) ((x) << S_CMD_THROTTLE_FIFO_CNT9) +#define G_CMD_THROTTLE_FIFO_CNT9(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT9) & M_CMD_THROTTLE_FIFO_CNT9) + +#define S_RD_CHNL_FIFO_CNT9 15 +#define M_RD_CHNL_FIFO_CNT9 0x7fU +#define V_RD_CHNL_FIFO_CNT9(x) ((x) << S_RD_CHNL_FIFO_CNT9) +#define G_RD_CHNL_FIFO_CNT9(x) (((x) >> S_RD_CHNL_FIFO_CNT9) & M_RD_CHNL_FIFO_CNT9) + +#define S_RD_DATA_EXT_FIFO_CNT9 13 +#define M_RD_DATA_EXT_FIFO_CNT9 0x3U +#define V_RD_DATA_EXT_FIFO_CNT9(x) ((x) << S_RD_DATA_EXT_FIFO_CNT9) +#define G_RD_DATA_EXT_FIFO_CNT9(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT9) & M_RD_DATA_EXT_FIFO_CNT9) + +#define S_RD_DATA_512B_FIFO_CNT9 5 +#define M_RD_DATA_512B_FIFO_CNT9 0xffU +#define V_RD_DATA_512B_FIFO_CNT9(x) ((x) << S_RD_DATA_512B_FIFO_CNT9) +#define G_RD_DATA_512B_FIFO_CNT9(x) (((x) >> S_RD_DATA_512B_FIFO_CNT9) & M_RD_DATA_512B_FIFO_CNT9) + +#define S_RD_REQ_TAG_FIFO_CNT9 1 +#define M_RD_REQ_TAG_FIFO_CNT9 0xfU +#define V_RD_REQ_TAG_FIFO_CNT9(x) ((x) << S_RD_REQ_TAG_FIFO_CNT9) +#define G_RD_REQ_TAG_FIFO_CNT9(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT9) & M_RD_REQ_TAG_FIFO_CNT9) + +#define A_MA_PM_TX_CLIENT_INTERFACE_INTERNAL_REG0 0xa40a + +#define S_CMD_IN_FIFO_CNT10 30 +#define M_CMD_IN_FIFO_CNT10 0x3U +#define V_CMD_IN_FIFO_CNT10(x) ((x) << S_CMD_IN_FIFO_CNT10) +#define G_CMD_IN_FIFO_CNT10(x) (((x) >> S_CMD_IN_FIFO_CNT10) & M_CMD_IN_FIFO_CNT10) + +#define S_CMD_SPLIT_FIFO_CNT10 28 +#define M_CMD_SPLIT_FIFO_CNT10 0x3U +#define V_CMD_SPLIT_FIFO_CNT10(x) ((x) << S_CMD_SPLIT_FIFO_CNT10) +#define G_CMD_SPLIT_FIFO_CNT10(x) (((x) >> S_CMD_SPLIT_FIFO_CNT10) & M_CMD_SPLIT_FIFO_CNT10) + +#define S_CMD_THROTTLE_FIFO_CNT10 22 +#define M_CMD_THROTTLE_FIFO_CNT10 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT10(x) ((x) << S_CMD_THROTTLE_FIFO_CNT10) +#define G_CMD_THROTTLE_FIFO_CNT10(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT10) & M_CMD_THROTTLE_FIFO_CNT10) + +#define S_RD_CHNL_FIFO_CNT10 15 +#define M_RD_CHNL_FIFO_CNT10 0x7fU +#define V_RD_CHNL_FIFO_CNT10(x) ((x) << S_RD_CHNL_FIFO_CNT10) +#define G_RD_CHNL_FIFO_CNT10(x) (((x) >> S_RD_CHNL_FIFO_CNT10) & M_RD_CHNL_FIFO_CNT10) + +#define S_RD_DATA_EXT_FIFO_CNT10 13 +#define M_RD_DATA_EXT_FIFO_CNT10 0x3U +#define V_RD_DATA_EXT_FIFO_CNT10(x) ((x) << S_RD_DATA_EXT_FIFO_CNT10) +#define G_RD_DATA_EXT_FIFO_CNT10(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT10) & M_RD_DATA_EXT_FIFO_CNT10) + +#define S_RD_DATA_512B_FIFO_CNT10 5 +#define M_RD_DATA_512B_FIFO_CNT10 0xffU +#define V_RD_DATA_512B_FIFO_CNT10(x) ((x) << S_RD_DATA_512B_FIFO_CNT10) +#define G_RD_DATA_512B_FIFO_CNT10(x) (((x) >> S_RD_DATA_512B_FIFO_CNT10) & M_RD_DATA_512B_FIFO_CNT10) + +#define S_RD_REQ_TAG_FIFO_CNT10 1 +#define M_RD_REQ_TAG_FIFO_CNT10 0xfU +#define V_RD_REQ_TAG_FIFO_CNT10(x) ((x) << S_RD_REQ_TAG_FIFO_CNT10) +#define G_RD_REQ_TAG_FIFO_CNT10(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT10) & M_RD_REQ_TAG_FIFO_CNT10) + +#define A_MA_PM_RX_CLIENT_INTERFACE_INTERNAL_REG0 0xa40b + +#define S_CMD_IN_FIFO_CNT11 30 +#define M_CMD_IN_FIFO_CNT11 0x3U +#define V_CMD_IN_FIFO_CNT11(x) ((x) << S_CMD_IN_FIFO_CNT11) +#define G_CMD_IN_FIFO_CNT11(x) (((x) >> S_CMD_IN_FIFO_CNT11) & M_CMD_IN_FIFO_CNT11) + +#define S_CMD_SPLIT_FIFO_CNT11 28 +#define M_CMD_SPLIT_FIFO_CNT11 0x3U +#define V_CMD_SPLIT_FIFO_CNT11(x) ((x) << S_CMD_SPLIT_FIFO_CNT11) +#define G_CMD_SPLIT_FIFO_CNT11(x) (((x) >> S_CMD_SPLIT_FIFO_CNT11) & M_CMD_SPLIT_FIFO_CNT11) + +#define S_CMD_THROTTLE_FIFO_CNT11 22 +#define M_CMD_THROTTLE_FIFO_CNT11 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT11(x) ((x) << S_CMD_THROTTLE_FIFO_CNT11) +#define G_CMD_THROTTLE_FIFO_CNT11(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT11) & M_CMD_THROTTLE_FIFO_CNT11) + +#define S_RD_CHNL_FIFO_CNT11 15 +#define M_RD_CHNL_FIFO_CNT11 0x7fU +#define V_RD_CHNL_FIFO_CNT11(x) ((x) << S_RD_CHNL_FIFO_CNT11) +#define G_RD_CHNL_FIFO_CNT11(x) (((x) >> S_RD_CHNL_FIFO_CNT11) & M_RD_CHNL_FIFO_CNT11) + +#define S_RD_DATA_EXT_FIFO_CNT11 13 +#define M_RD_DATA_EXT_FIFO_CNT11 0x3U +#define V_RD_DATA_EXT_FIFO_CNT11(x) ((x) << S_RD_DATA_EXT_FIFO_CNT11) +#define G_RD_DATA_EXT_FIFO_CNT11(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT11) & M_RD_DATA_EXT_FIFO_CNT11) + +#define S_RD_DATA_512B_FIFO_CNT11 5 +#define M_RD_DATA_512B_FIFO_CNT11 0xffU +#define V_RD_DATA_512B_FIFO_CNT11(x) ((x) << S_RD_DATA_512B_FIFO_CNT11) +#define G_RD_DATA_512B_FIFO_CNT11(x) (((x) >> S_RD_DATA_512B_FIFO_CNT11) & M_RD_DATA_512B_FIFO_CNT11) + +#define S_RD_REQ_TAG_FIFO_CNT11 1 +#define M_RD_REQ_TAG_FIFO_CNT11 0xfU +#define V_RD_REQ_TAG_FIFO_CNT11(x) ((x) << S_RD_REQ_TAG_FIFO_CNT11) +#define G_RD_REQ_TAG_FIFO_CNT11(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT11) & M_RD_REQ_TAG_FIFO_CNT11) + +#define A_MA_HMA_CLIENT_INTERFACE_INTERNAL_REG0 0xa40c + +#define S_CMD_IN_FIFO_CNT12 30 +#define M_CMD_IN_FIFO_CNT12 0x3U +#define V_CMD_IN_FIFO_CNT12(x) ((x) << S_CMD_IN_FIFO_CNT12) +#define G_CMD_IN_FIFO_CNT12(x) (((x) >> S_CMD_IN_FIFO_CNT12) & M_CMD_IN_FIFO_CNT12) + +#define S_CMD_SPLIT_FIFO_CNT12 28 +#define M_CMD_SPLIT_FIFO_CNT12 0x3U +#define V_CMD_SPLIT_FIFO_CNT12(x) ((x) << S_CMD_SPLIT_FIFO_CNT12) +#define G_CMD_SPLIT_FIFO_CNT12(x) (((x) >> S_CMD_SPLIT_FIFO_CNT12) & M_CMD_SPLIT_FIFO_CNT12) + +#define S_CMD_THROTTLE_FIFO_CNT12 22 +#define M_CMD_THROTTLE_FIFO_CNT12 0x3fU +#define V_CMD_THROTTLE_FIFO_CNT12(x) ((x) << S_CMD_THROTTLE_FIFO_CNT12) +#define G_CMD_THROTTLE_FIFO_CNT12(x) (((x) >> S_CMD_THROTTLE_FIFO_CNT12) & M_CMD_THROTTLE_FIFO_CNT12) + +#define S_RD_CHNL_FIFO_CNT12 15 +#define M_RD_CHNL_FIFO_CNT12 0x7fU +#define V_RD_CHNL_FIFO_CNT12(x) ((x) << S_RD_CHNL_FIFO_CNT12) +#define G_RD_CHNL_FIFO_CNT12(x) (((x) >> S_RD_CHNL_FIFO_CNT12) & M_RD_CHNL_FIFO_CNT12) + +#define S_RD_DATA_EXT_FIFO_CNT12 13 +#define M_RD_DATA_EXT_FIFO_CNT12 0x3U +#define V_RD_DATA_EXT_FIFO_CNT12(x) ((x) << S_RD_DATA_EXT_FIFO_CNT12) +#define G_RD_DATA_EXT_FIFO_CNT12(x) (((x) >> S_RD_DATA_EXT_FIFO_CNT12) & M_RD_DATA_EXT_FIFO_CNT12) + +#define S_RD_DATA_512B_FIFO_CNT12 5 +#define M_RD_DATA_512B_FIFO_CNT12 0xffU +#define V_RD_DATA_512B_FIFO_CNT12(x) ((x) << S_RD_DATA_512B_FIFO_CNT12) +#define G_RD_DATA_512B_FIFO_CNT12(x) (((x) >> S_RD_DATA_512B_FIFO_CNT12) & M_RD_DATA_512B_FIFO_CNT12) + +#define S_RD_REQ_TAG_FIFO_CNT12 1 +#define M_RD_REQ_TAG_FIFO_CNT12 0xfU +#define V_RD_REQ_TAG_FIFO_CNT12(x) ((x) << S_RD_REQ_TAG_FIFO_CNT12) +#define G_RD_REQ_TAG_FIFO_CNT12(x) (((x) >> S_RD_REQ_TAG_FIFO_CNT12) & M_RD_REQ_TAG_FIFO_CNT12) + +#define A_MA_TARGET_0_ARBITER_INTERFACE_INTERNAL_REG0 0xa40d + +#define S_WR_DATA_FSM0 23 +#define V_WR_DATA_FSM0(x) ((x) << S_WR_DATA_FSM0) +#define F_WR_DATA_FSM0 V_WR_DATA_FSM0(1U) + +#define S_RD_DATA_FSM0 22 +#define V_RD_DATA_FSM0(x) ((x) << S_RD_DATA_FSM0) +#define F_RD_DATA_FSM0 V_RD_DATA_FSM0(1U) + +#define S_TGT_CMD_FIFO_CNT0 19 +#define M_TGT_CMD_FIFO_CNT0 0x7U +#define V_TGT_CMD_FIFO_CNT0(x) ((x) << S_TGT_CMD_FIFO_CNT0) +#define G_TGT_CMD_FIFO_CNT0(x) (((x) >> S_TGT_CMD_FIFO_CNT0) & M_TGT_CMD_FIFO_CNT0) + +#define S_CLNT_NUM_FIFO_CNT0 16 +#define M_CLNT_NUM_FIFO_CNT0 0x7U +#define V_CLNT_NUM_FIFO_CNT0(x) ((x) << S_CLNT_NUM_FIFO_CNT0) +#define G_CLNT_NUM_FIFO_CNT0(x) (((x) >> S_CLNT_NUM_FIFO_CNT0) & M_CLNT_NUM_FIFO_CNT0) + +#define S_WR_CMD_TAG_FIFO_CNT_TGT0 8 +#define M_WR_CMD_TAG_FIFO_CNT_TGT0 0xffU +#define V_WR_CMD_TAG_FIFO_CNT_TGT0(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT0) +#define G_WR_CMD_TAG_FIFO_CNT_TGT0(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT0) & M_WR_CMD_TAG_FIFO_CNT_TGT0) + +#define S_WR_DATA_512B_FIFO_CNT_TGT0 0 +#define M_WR_DATA_512B_FIFO_CNT_TGT0 0xffU +#define V_WR_DATA_512B_FIFO_CNT_TGT0(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT0) +#define G_WR_DATA_512B_FIFO_CNT_TGT0(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT0) & M_WR_DATA_512B_FIFO_CNT_TGT0) + +#define A_MA_TARGET_1_ARBITER_INTERFACE_INTERNAL_REG0 0xa40e + +#define S_WR_DATA_FSM1 23 +#define V_WR_DATA_FSM1(x) ((x) << S_WR_DATA_FSM1) +#define F_WR_DATA_FSM1 V_WR_DATA_FSM1(1U) + +#define S_RD_DATA_FSM1 22 +#define V_RD_DATA_FSM1(x) ((x) << S_RD_DATA_FSM1) +#define F_RD_DATA_FSM1 V_RD_DATA_FSM1(1U) + +#define S_TGT_CMD_FIFO_CNT1 19 +#define M_TGT_CMD_FIFO_CNT1 0x7U +#define V_TGT_CMD_FIFO_CNT1(x) ((x) << S_TGT_CMD_FIFO_CNT1) +#define G_TGT_CMD_FIFO_CNT1(x) (((x) >> S_TGT_CMD_FIFO_CNT1) & M_TGT_CMD_FIFO_CNT1) + +#define S_CLNT_NUM_FIFO_CNT1 16 +#define M_CLNT_NUM_FIFO_CNT1 0x7U +#define V_CLNT_NUM_FIFO_CNT1(x) ((x) << S_CLNT_NUM_FIFO_CNT1) +#define G_CLNT_NUM_FIFO_CNT1(x) (((x) >> S_CLNT_NUM_FIFO_CNT1) & M_CLNT_NUM_FIFO_CNT1) + +#define S_WR_CMD_TAG_FIFO_CNT_TGT1 8 +#define M_WR_CMD_TAG_FIFO_CNT_TGT1 0xffU +#define V_WR_CMD_TAG_FIFO_CNT_TGT1(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT1) +#define G_WR_CMD_TAG_FIFO_CNT_TGT1(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT1) & M_WR_CMD_TAG_FIFO_CNT_TGT1) + +#define S_WR_DATA_512B_FIFO_CNT_TGT1 0 +#define M_WR_DATA_512B_FIFO_CNT_TGT1 0xffU +#define V_WR_DATA_512B_FIFO_CNT_TGT1(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT1) +#define G_WR_DATA_512B_FIFO_CNT_TGT1(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT1) & M_WR_DATA_512B_FIFO_CNT_TGT1) + +#define A_MA_TARGET_2_ARBITER_INTERFACE_INTERNAL_REG0 0xa40f + +#define S_WR_DATA_FSM2 23 +#define V_WR_DATA_FSM2(x) ((x) << S_WR_DATA_FSM2) +#define F_WR_DATA_FSM2 V_WR_DATA_FSM2(1U) + +#define S_RD_DATA_FSM2 22 +#define V_RD_DATA_FSM2(x) ((x) << S_RD_DATA_FSM2) +#define F_RD_DATA_FSM2 V_RD_DATA_FSM2(1U) + +#define S_TGT_CMD_FIFO_CNT2 19 +#define M_TGT_CMD_FIFO_CNT2 0x7U +#define V_TGT_CMD_FIFO_CNT2(x) ((x) << S_TGT_CMD_FIFO_CNT2) +#define G_TGT_CMD_FIFO_CNT2(x) (((x) >> S_TGT_CMD_FIFO_CNT2) & M_TGT_CMD_FIFO_CNT2) + +#define S_CLNT_NUM_FIFO_CNT2 16 +#define M_CLNT_NUM_FIFO_CNT2 0x7U +#define V_CLNT_NUM_FIFO_CNT2(x) ((x) << S_CLNT_NUM_FIFO_CNT2) +#define G_CLNT_NUM_FIFO_CNT2(x) (((x) >> S_CLNT_NUM_FIFO_CNT2) & M_CLNT_NUM_FIFO_CNT2) + +#define S_WR_CMD_TAG_FIFO_CNT_TGT2 8 +#define M_WR_CMD_TAG_FIFO_CNT_TGT2 0xffU +#define V_WR_CMD_TAG_FIFO_CNT_TGT2(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT2) +#define G_WR_CMD_TAG_FIFO_CNT_TGT2(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT2) & M_WR_CMD_TAG_FIFO_CNT_TGT2) + +#define S_WR_DATA_512B_FIFO_CNT_TGT2 0 +#define M_WR_DATA_512B_FIFO_CNT_TGT2 0xffU +#define V_WR_DATA_512B_FIFO_CNT_TGT2(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT2) +#define G_WR_DATA_512B_FIFO_CNT_TGT2(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT2) & M_WR_DATA_512B_FIFO_CNT_TGT2) + +#define A_MA_TARGET_3_ARBITER_INTERFACE_INTERNAL_REG0 0xa410 + +#define S_WR_DATA_FSM3 23 +#define V_WR_DATA_FSM3(x) ((x) << S_WR_DATA_FSM3) +#define F_WR_DATA_FSM3 V_WR_DATA_FSM3(1U) + +#define S_RD_DATA_FSM3 22 +#define V_RD_DATA_FSM3(x) ((x) << S_RD_DATA_FSM3) +#define F_RD_DATA_FSM3 V_RD_DATA_FSM3(1U) + +#define S_TGT_CMD_FIFO_CNT3 19 +#define M_TGT_CMD_FIFO_CNT3 0x7U +#define V_TGT_CMD_FIFO_CNT3(x) ((x) << S_TGT_CMD_FIFO_CNT3) +#define G_TGT_CMD_FIFO_CNT3(x) (((x) >> S_TGT_CMD_FIFO_CNT3) & M_TGT_CMD_FIFO_CNT3) + +#define S_CLNT_NUM_FIFO_CNT3 16 +#define M_CLNT_NUM_FIFO_CNT3 0x7U +#define V_CLNT_NUM_FIFO_CNT3(x) ((x) << S_CLNT_NUM_FIFO_CNT3) +#define G_CLNT_NUM_FIFO_CNT3(x) (((x) >> S_CLNT_NUM_FIFO_CNT3) & M_CLNT_NUM_FIFO_CNT3) + +#define S_WR_CMD_TAG_FIFO_CNT_TGT3 8 +#define M_WR_CMD_TAG_FIFO_CNT_TGT3 0xffU +#define V_WR_CMD_TAG_FIFO_CNT_TGT3(x) ((x) << S_WR_CMD_TAG_FIFO_CNT_TGT3) +#define G_WR_CMD_TAG_FIFO_CNT_TGT3(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT_TGT3) & M_WR_CMD_TAG_FIFO_CNT_TGT3) + +#define S_WR_DATA_512B_FIFO_CNT_TGT 0 +#define M_WR_DATA_512B_FIFO_CNT_TGT 0xffU +#define V_WR_DATA_512B_FIFO_CNT_TGT(x) ((x) << S_WR_DATA_512B_FIFO_CNT_TGT) +#define G_WR_DATA_512B_FIFO_CNT_TGT(x) (((x) >> S_WR_DATA_512B_FIFO_CNT_TGT) & M_WR_DATA_512B_FIFO_CNT_TGT) + +#define A_MA_SGE_THREAD_0_CLNT_EXP_RD_CYC_CNT_LO 0xa412 +#define A_MA_SGE_THREAD_1_CLNT_EXP_RD_CYC_CNT_LO 0xa413 +#define A_MA_ULP_TX_CLNT_EXP_RD_CYC_CNT_LO 0xa414 +#define A_MA_ULP_RX_CLNT_EXP_RD_CYC_CNT_LO 0xa415 +#define A_MA_ULP_TX_RX_CLNT_EXP_RD_CYC_CNT_LO 0xa416 +#define A_MA_TP_THREAD_0_CLNT_EXP_RD_CYC_CNT_LO 0xa417 +#define A_MA_TP_THREAD_1_CLNT_EXP_RD_CYC_CNT_LO 0xa418 +#define A_MA_LE_CLNT_EXP_RD_CYC_CNT_LO 0xa419 +#define A_MA_CIM_CLNT_EXP_RD_CYC_CNT_LO 0xa41a +#define A_MA_PCIE_CLNT_EXP_RD_CYC_CNT_LO 0xa41b +#define A_MA_PM_TX_CLNT_EXP_RD_CYC_CNT_LO 0xa41c +#define A_MA_PM_RX_CLNT_EXP_RD_CYC_CNT_LO 0xa41d +#define A_MA_HMA_CLNT_EXP_RD_CYC_CNT_LO 0xa41e +#define A_T6_MA_EDRAM0_WRDATA_CNT1 0xa800 +#define A_T6_MA_EDRAM0_WRDATA_CNT0 0xa801 +#define A_T6_MA_EDRAM1_WRDATA_CNT1 0xa802 +#define A_T6_MA_EDRAM1_WRDATA_CNT0 0xa803 +#define A_T6_MA_EXT_MEMORY0_WRDATA_CNT1 0xa804 +#define A_T6_MA_EXT_MEMORY0_WRDATA_CNT0 0xa805 +#define A_T6_MA_HOST_MEMORY_WRDATA_CNT1 0xa806 +#define A_T6_MA_HOST_MEMORY_WRDATA_CNT0 0xa807 +#define A_T6_MA_EXT_MEMORY1_WRDATA_CNT1 0xa808 +#define A_T6_MA_EXT_MEMORY1_WRDATA_CNT0 0xa809 +#define A_T6_MA_EDRAM0_RDDATA_CNT1 0xa80a +#define A_T6_MA_EDRAM0_RDDATA_CNT0 0xa80b +#define A_T6_MA_EDRAM1_RDDATA_CNT1 0xa80c +#define A_T6_MA_EDRAM1_RDDATA_CNT0 0xa80d +#define A_T6_MA_EXT_MEMORY0_RDDATA_CNT1 0xa80e +#define A_T6_MA_EXT_MEMORY0_RDDATA_CNT0 0xa80f +#define A_T6_MA_HOST_MEMORY_RDDATA_CNT1 0xa810 +#define A_T6_MA_HOST_MEMORY_RDDATA_CNT0 0xa811 +#define A_T6_MA_EXT_MEMORY1_RDDATA_CNT1 0xa812 +#define A_T6_MA_EXT_MEMORY1_RDDATA_CNT0 0xa813 +#define A_MA_SGE_THREAD_0_CLNT_ACT_WR_CYC_CNT_HI 0xac00 +#define A_MA_SGE_THREAD_0_CLNT_ACT_WR_CYC_CNT_LO 0xac01 +#define A_MA_SGE_THREAD_1_CLNT_ACT_WR_CYC_CNT_HI 0xac02 +#define A_MA_SGE_THREAD_1_CLNT_ACT_WR_CYC_CNT_LO 0xac03 +#define A_MA_ULP_TX_CLNT_ACT_WR_CYC_CNT_HI 0xac04 +#define A_MA_ULP_TX_CLNT_ACT_WR_CYC_CNT_LO 0xac05 +#define A_MA_ULP_RX_CLNT_ACT_WR_CYC_CNT_HI 0xac06 +#define A_MA_ULP_RX_CLNT_ACT_WR_CYC_CNT_LO 0xac07 +#define A_MA_ULP_TX_RX_CLNT_ACT_WR_CYC_CNT_HI 0xac08 +#define A_MA_ULP_TX_RX_CLNT_ACT_WR_CYC_CNT_LO 0xac09 +#define A_MA_TP_THREAD_0_CLNT_ACT_WR_CYC_CNT_HI 0xac0a +#define A_MA_TP_THREAD_0_CLNT_ACT_WR_CYC_CNT_LO 0xac0b +#define A_MA_TP_THREAD_1_CLNT_ACT_WR_CYC_CNT_HI 0xac0c +#define A_MA_TP_THREAD_1_CLNT_ACT_WR_CYC_CNT_LO 0xac0d +#define A_MA_LE_CLNT_ACT_WR_CYC_CNT_HI 0xac0e +#define A_MA_LE_CLNT_ACT_WR_CYC_CNT_LO 0xac0f +#define A_MA_CIM_CLNT_ACT_WR_CYC_CNT_HI 0xac10 +#define A_MA_CIM_CLNT_ACT_WR_CYC_CNT_LO 0xac11 +#define A_MA_PCIE_CLNT_ACT_WR_CYC_CNT_HI 0xac12 +#define A_MA_PCIE_CLNT_ACT_WR_CYC_CNT_LO 0xac13 +#define A_MA_PM_TX_CLNT_ACT_WR_CYC_CNT_HI 0xac14 +#define A_MA_PM_TX_CLNT_ACT_WR_CYC_CNT_LO 0xac15 +#define A_MA_PM_RX_CLNT_ACT_WR_CYC_CNT_HI 0xac16 +#define A_MA_PM_RX_CLNT_ACT_WR_CYC_CNT_LO 0xac17 +#define A_MA_HMA_CLNT_ACT_WR_CYC_CNT_HI 0xac18 +#define A_MA_HMA_CLNT_ACT_WR_CYC_CNT_LO 0xac19 +#define A_MA_SGE_THREAD_0_CLNT_WR_REQ_CNT 0xb000 +#define A_MA_SGE_THREAD_1_CLNT_WR_REQ_CNT 0xb001 +#define A_MA_ULP_TX_CLNT_WR_REQ_CNT 0xb002 +#define A_MA_ULP_RX_CLNT_WR_REQ_CNT 0xb003 +#define A_MA_ULP_TX_RX_CLNT_WR_REQ_CNT 0xb004 +#define A_MA_TP_THREAD_0_CLNT_WR_REQ_CNT 0xb005 +#define A_MA_TP_THREAD_1_CLNT_WR_REQ_CNT 0xb006 +#define A_MA_LE_CLNT_WR_REQ_CNT 0xb007 +#define A_MA_CIM_CLNT_WR_REQ_CNT 0xb008 +#define A_MA_PCIE_CLNT_WR_REQ_CNT 0xb009 +#define A_MA_PM_TX_CLNT_WR_REQ_CNT 0xb00a +#define A_MA_PM_RX_CLNT_WR_REQ_CNT 0xb00b +#define A_MA_HMA_CLNT_WR_REQ_CNT 0xb00c +#define A_MA_SGE_THREAD_0_CLNT_RD_REQ_CNT 0xb00d +#define A_MA_SGE_THREAD_1_CLNT_RD_REQ_CNT 0xb00e +#define A_MA_ULP_TX_CLNT_RD_REQ_CNT 0xb00f +#define A_MA_ULP_RX_CLNT_RD_REQ_CNT 0xb010 +#define A_MA_ULP_TX_RX_CLNT_RD_REQ_CNT 0xb011 +#define A_MA_TP_THREAD_0_CLNT_RD_REQ_CNT 0xb012 +#define A_MA_TP_THREAD_1_CLNT_RD_REQ_CNT 0xb013 +#define A_MA_LE_CLNT_RD_REQ_CNT 0xb014 +#define A_MA_CIM_CLNT_RD_REQ_CNT 0xb015 +#define A_MA_PCIE_CLNT_RD_REQ_CNT 0xb016 +#define A_MA_PM_TX_CLNT_RD_REQ_CNT 0xb017 +#define A_MA_PM_RX_CLNT_RD_REQ_CNT 0xb018 +#define A_MA_HMA_CLNT_RD_REQ_CNT 0xb019 +#define A_MA_SGE_THREAD_0_CLNT_EXP_RD_CYC_CNT_HI 0xb400 +#define A_MA_SGE_THREAD_1_CLNT_EXP_RD_CYC_CNT_HI 0xb401 +#define A_MA_ULP_TX_CLNT_EXP_RD_CYC_CNT_HI 0xb402 +#define A_MA_ULP_RX_CLNT_EXP_RD_CYC_CNT_HI 0xb403 +#define A_MA_ULP_TX_RX_CLNT_EXP_RD_CYC_CNT_HI 0xb404 +#define A_MA_TP_THREAD_0_CLNT_EXP_RD_CYC_CNT_HI 0xb405 +#define A_MA_TP_THREAD_1_CLNT_EXP_RD_CYC_CNT_HI 0xb406 +#define A_MA_LE_CLNT_EXP_RD_CYC_CNT_HI 0xb407 +#define A_MA_CIM_CLNT_EXP_RD_CYC_CNT_HI 0xb408 +#define A_MA_PCIE_CLNT_EXP_RD_CYC_CNT_HI 0xb409 +#define A_MA_PM_TX_CLNT_EXP_RD_CYC_CNT_HI 0xb40a +#define A_MA_PM_RX_CLNT_EXP_RD_CYC_CNT_HI 0xb40b +#define A_MA_HMA_CLNT_EXP_RD_CYC_CNT_HI 0xb40c +#define A_MA_SGE_THREAD_0_CLNT_EXP_WR_CYC_CNT_HI 0xb40d +#define A_MA_SGE_THREAD_1_CLNT_EXP_WR_CYC_CNT_HI 0xb40e +#define A_MA_ULP_TX_CLNT_EXP_WR_CYC_CNT_HI 0xb40f +#define A_MA_ULP_RX_CLNT_EXP_WR_CYC_CNT_HI 0xb410 +#define A_MA_ULP_TX_RX_CLNT_EXP_WR_CYC_CNT_HI 0xb411 +#define A_MA_TP_THREAD_0_CLNT_EXP_WR_CYC_CNT_HI 0xb412 +#define A_MA_TP_THREAD_1_CLNT_EXP_WR_CYC_CNT_HI 0xb413 +#define A_MA_LE_CLNT_EXP_WR_CYC_CNT_HI 0xb414 +#define A_MA_CIM_CLNT_EXP_WR_CYC_CNT_HI 0xb415 +#define A_MA_PCIE_CLNT_EXP_WR_CYC_CNT_HI 0xb416 +#define A_MA_PM_TX_CLNT_EXP_WR_CYC_CNT_HI 0xb417 +#define A_MA_PM_RX_CLNT_EXP_WR_CYC_CNT_HI 0xb418 +#define A_MA_HMA_CLNT_EXP_WR_CYC_CNT_HI 0xb419 +#define A_MA_SGE_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG1 0xe400 + +#define S_WR_DATA_EXT_FIFO_CNT0 30 +#define M_WR_DATA_EXT_FIFO_CNT0 0x3U +#define V_WR_DATA_EXT_FIFO_CNT0(x) ((x) << S_WR_DATA_EXT_FIFO_CNT0) +#define G_WR_DATA_EXT_FIFO_CNT0(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT0) & M_WR_DATA_EXT_FIFO_CNT0) + +#define S_WR_CMD_TAG_FIFO_CNT0 26 +#define M_WR_CMD_TAG_FIFO_CNT0 0xfU +#define V_WR_CMD_TAG_FIFO_CNT0(x) ((x) << S_WR_CMD_TAG_FIFO_CNT0) +#define G_WR_CMD_TAG_FIFO_CNT0(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT0) & M_WR_CMD_TAG_FIFO_CNT0) + +#define S_WR_DATA_512B_FIFO_CNT0 18 +#define M_WR_DATA_512B_FIFO_CNT0 0xffU +#define V_WR_DATA_512B_FIFO_CNT0(x) ((x) << S_WR_DATA_512B_FIFO_CNT0) +#define G_WR_DATA_512B_FIFO_CNT0(x) (((x) >> S_WR_DATA_512B_FIFO_CNT0) & M_WR_DATA_512B_FIFO_CNT0) + +#define S_RD_DATA_ALIGN_FSM0 17 +#define V_RD_DATA_ALIGN_FSM0(x) ((x) << S_RD_DATA_ALIGN_FSM0) +#define F_RD_DATA_ALIGN_FSM0 V_RD_DATA_ALIGN_FSM0(1U) + +#define S_RD_DATA_FETCH_FSM0 16 +#define V_RD_DATA_FETCH_FSM0(x) ((x) << S_RD_DATA_FETCH_FSM0) +#define F_RD_DATA_FETCH_FSM0 V_RD_DATA_FETCH_FSM0(1U) + +#define S_COHERENCY_TX_FSM0 15 +#define V_COHERENCY_TX_FSM0(x) ((x) << S_COHERENCY_TX_FSM0) +#define F_COHERENCY_TX_FSM0 V_COHERENCY_TX_FSM0(1U) + +#define S_COHERENCY_RX_FSM0 14 +#define V_COHERENCY_RX_FSM0(x) ((x) << S_COHERENCY_RX_FSM0) +#define F_COHERENCY_RX_FSM0 V_COHERENCY_RX_FSM0(1U) + +#define S_ARB_REQ_FSM0 13 +#define V_ARB_REQ_FSM0(x) ((x) << S_ARB_REQ_FSM0) +#define F_ARB_REQ_FSM0 V_ARB_REQ_FSM0(1U) + +#define S_CMD_SPLIT_FSM0 10 +#define M_CMD_SPLIT_FSM0 0x7U +#define V_CMD_SPLIT_FSM0(x) ((x) << S_CMD_SPLIT_FSM0) +#define G_CMD_SPLIT_FSM0(x) (((x) >> S_CMD_SPLIT_FSM0) & M_CMD_SPLIT_FSM0) + +#define A_MA_SGE_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG1 0xe420 + +#define S_WR_DATA_EXT_FIFO_CNT1 30 +#define M_WR_DATA_EXT_FIFO_CNT1 0x3U +#define V_WR_DATA_EXT_FIFO_CNT1(x) ((x) << S_WR_DATA_EXT_FIFO_CNT1) +#define G_WR_DATA_EXT_FIFO_CNT1(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT1) & M_WR_DATA_EXT_FIFO_CNT1) + +#define S_WR_CMD_TAG_FIFO_CNT1 26 +#define M_WR_CMD_TAG_FIFO_CNT1 0xfU +#define V_WR_CMD_TAG_FIFO_CNT1(x) ((x) << S_WR_CMD_TAG_FIFO_CNT1) +#define G_WR_CMD_TAG_FIFO_CNT1(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT1) & M_WR_CMD_TAG_FIFO_CNT1) + +#define S_WR_DATA_512B_FIFO_CNT1 18 +#define M_WR_DATA_512B_FIFO_CNT1 0xffU +#define V_WR_DATA_512B_FIFO_CNT1(x) ((x) << S_WR_DATA_512B_FIFO_CNT1) +#define G_WR_DATA_512B_FIFO_CNT1(x) (((x) >> S_WR_DATA_512B_FIFO_CNT1) & M_WR_DATA_512B_FIFO_CNT1) + +#define S_RD_DATA_ALIGN_FSM1 17 +#define V_RD_DATA_ALIGN_FSM1(x) ((x) << S_RD_DATA_ALIGN_FSM1) +#define F_RD_DATA_ALIGN_FSM1 V_RD_DATA_ALIGN_FSM1(1U) + +#define S_RD_DATA_FETCH_FSM1 16 +#define V_RD_DATA_FETCH_FSM1(x) ((x) << S_RD_DATA_FETCH_FSM1) +#define F_RD_DATA_FETCH_FSM1 V_RD_DATA_FETCH_FSM1(1U) + +#define S_COHERENCY_TX_FSM1 15 +#define V_COHERENCY_TX_FSM1(x) ((x) << S_COHERENCY_TX_FSM1) +#define F_COHERENCY_TX_FSM1 V_COHERENCY_TX_FSM1(1U) + +#define S_COHERENCY_RX_FSM1 14 +#define V_COHERENCY_RX_FSM1(x) ((x) << S_COHERENCY_RX_FSM1) +#define F_COHERENCY_RX_FSM1 V_COHERENCY_RX_FSM1(1U) + +#define S_ARB_REQ_FSM1 13 +#define V_ARB_REQ_FSM1(x) ((x) << S_ARB_REQ_FSM1) +#define F_ARB_REQ_FSM1 V_ARB_REQ_FSM1(1U) + +#define S_CMD_SPLIT_FSM1 10 +#define M_CMD_SPLIT_FSM1 0x7U +#define V_CMD_SPLIT_FSM1(x) ((x) << S_CMD_SPLIT_FSM1) +#define G_CMD_SPLIT_FSM1(x) (((x) >> S_CMD_SPLIT_FSM1) & M_CMD_SPLIT_FSM1) + +#define A_MA_ULP_TX_CLIENT_INTERFACE_INTERNAL_REG1 0xe440 + +#define S_WR_DATA_EXT_FIFO_CNT2 30 +#define M_WR_DATA_EXT_FIFO_CNT2 0x3U +#define V_WR_DATA_EXT_FIFO_CNT2(x) ((x) << S_WR_DATA_EXT_FIFO_CNT2) +#define G_WR_DATA_EXT_FIFO_CNT2(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT2) & M_WR_DATA_EXT_FIFO_CNT2) + +#define S_WR_CMD_TAG_FIFO_CNT2 26 +#define M_WR_CMD_TAG_FIFO_CNT2 0xfU +#define V_WR_CMD_TAG_FIFO_CNT2(x) ((x) << S_WR_CMD_TAG_FIFO_CNT2) +#define G_WR_CMD_TAG_FIFO_CNT2(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT2) & M_WR_CMD_TAG_FIFO_CNT2) + +#define S_WR_DATA_512B_FIFO_CNT2 18 +#define M_WR_DATA_512B_FIFO_CNT2 0xffU +#define V_WR_DATA_512B_FIFO_CNT2(x) ((x) << S_WR_DATA_512B_FIFO_CNT2) +#define G_WR_DATA_512B_FIFO_CNT2(x) (((x) >> S_WR_DATA_512B_FIFO_CNT2) & M_WR_DATA_512B_FIFO_CNT2) + +#define S_RD_DATA_ALIGN_FSM2 17 +#define V_RD_DATA_ALIGN_FSM2(x) ((x) << S_RD_DATA_ALIGN_FSM2) +#define F_RD_DATA_ALIGN_FSM2 V_RD_DATA_ALIGN_FSM2(1U) + +#define S_RD_DATA_FETCH_FSM2 16 +#define V_RD_DATA_FETCH_FSM2(x) ((x) << S_RD_DATA_FETCH_FSM2) +#define F_RD_DATA_FETCH_FSM2 V_RD_DATA_FETCH_FSM2(1U) + +#define S_COHERENCY_TX_FSM2 15 +#define V_COHERENCY_TX_FSM2(x) ((x) << S_COHERENCY_TX_FSM2) +#define F_COHERENCY_TX_FSM2 V_COHERENCY_TX_FSM2(1U) + +#define S_COHERENCY_RX_FSM2 14 +#define V_COHERENCY_RX_FSM2(x) ((x) << S_COHERENCY_RX_FSM2) +#define F_COHERENCY_RX_FSM2 V_COHERENCY_RX_FSM2(1U) + +#define S_ARB_REQ_FSM2 13 +#define V_ARB_REQ_FSM2(x) ((x) << S_ARB_REQ_FSM2) +#define F_ARB_REQ_FSM2 V_ARB_REQ_FSM2(1U) + +#define S_CMD_SPLIT_FSM2 10 +#define M_CMD_SPLIT_FSM2 0x7U +#define V_CMD_SPLIT_FSM2(x) ((x) << S_CMD_SPLIT_FSM2) +#define G_CMD_SPLIT_FSM2(x) (((x) >> S_CMD_SPLIT_FSM2) & M_CMD_SPLIT_FSM2) + +#define A_MA_ULP_RX_CLIENT_INTERFACE_INTERNAL_REG1 0xe460 + +#define S_WR_DATA_EXT_FIFO_CNT3 30 +#define M_WR_DATA_EXT_FIFO_CNT3 0x3U +#define V_WR_DATA_EXT_FIFO_CNT3(x) ((x) << S_WR_DATA_EXT_FIFO_CNT3) +#define G_WR_DATA_EXT_FIFO_CNT3(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT3) & M_WR_DATA_EXT_FIFO_CNT3) + +#define S_WR_CMD_TAG_FIFO_CNT3 26 +#define M_WR_CMD_TAG_FIFO_CNT3 0xfU +#define V_WR_CMD_TAG_FIFO_CNT3(x) ((x) << S_WR_CMD_TAG_FIFO_CNT3) +#define G_WR_CMD_TAG_FIFO_CNT3(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT3) & M_WR_CMD_TAG_FIFO_CNT3) + +#define S_WR_DATA_512B_FIFO_CNT3 18 +#define M_WR_DATA_512B_FIFO_CNT3 0xffU +#define V_WR_DATA_512B_FIFO_CNT3(x) ((x) << S_WR_DATA_512B_FIFO_CNT3) +#define G_WR_DATA_512B_FIFO_CNT3(x) (((x) >> S_WR_DATA_512B_FIFO_CNT3) & M_WR_DATA_512B_FIFO_CNT3) + +#define S_RD_DATA_ALIGN_FSM3 17 +#define V_RD_DATA_ALIGN_FSM3(x) ((x) << S_RD_DATA_ALIGN_FSM3) +#define F_RD_DATA_ALIGN_FSM3 V_RD_DATA_ALIGN_FSM3(1U) + +#define S_RD_DATA_FETCH_FSM3 16 +#define V_RD_DATA_FETCH_FSM3(x) ((x) << S_RD_DATA_FETCH_FSM3) +#define F_RD_DATA_FETCH_FSM3 V_RD_DATA_FETCH_FSM3(1U) + +#define S_COHERENCY_TX_FSM3 15 +#define V_COHERENCY_TX_FSM3(x) ((x) << S_COHERENCY_TX_FSM3) +#define F_COHERENCY_TX_FSM3 V_COHERENCY_TX_FSM3(1U) + +#define S_COHERENCY_RX_FSM3 14 +#define V_COHERENCY_RX_FSM3(x) ((x) << S_COHERENCY_RX_FSM3) +#define F_COHERENCY_RX_FSM3 V_COHERENCY_RX_FSM3(1U) + +#define S_ARB_REQ_FSM3 13 +#define V_ARB_REQ_FSM3(x) ((x) << S_ARB_REQ_FSM3) +#define F_ARB_REQ_FSM3 V_ARB_REQ_FSM3(1U) + +#define S_CMD_SPLIT_FSM3 10 +#define M_CMD_SPLIT_FSM3 0x7U +#define V_CMD_SPLIT_FSM3(x) ((x) << S_CMD_SPLIT_FSM3) +#define G_CMD_SPLIT_FSM3(x) (((x) >> S_CMD_SPLIT_FSM3) & M_CMD_SPLIT_FSM3) + +#define A_MA_ULP_TX_RX_CLIENT_INTERFACE_INTERNAL_REG1 0xe480 + +#define S_WR_DATA_EXT_FIFO_CNT4 30 +#define M_WR_DATA_EXT_FIFO_CNT4 0x3U +#define V_WR_DATA_EXT_FIFO_CNT4(x) ((x) << S_WR_DATA_EXT_FIFO_CNT4) +#define G_WR_DATA_EXT_FIFO_CNT4(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT4) & M_WR_DATA_EXT_FIFO_CNT4) + +#define S_WR_CMD_TAG_FIFO_CNT4 26 +#define M_WR_CMD_TAG_FIFO_CNT4 0xfU +#define V_WR_CMD_TAG_FIFO_CNT4(x) ((x) << S_WR_CMD_TAG_FIFO_CNT4) +#define G_WR_CMD_TAG_FIFO_CNT4(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT4) & M_WR_CMD_TAG_FIFO_CNT4) + +#define S_WR_DATA_512B_FIFO_CNT4 18 +#define M_WR_DATA_512B_FIFO_CNT4 0xffU +#define V_WR_DATA_512B_FIFO_CNT4(x) ((x) << S_WR_DATA_512B_FIFO_CNT4) +#define G_WR_DATA_512B_FIFO_CNT4(x) (((x) >> S_WR_DATA_512B_FIFO_CNT4) & M_WR_DATA_512B_FIFO_CNT4) + +#define S_RD_DATA_ALIGN_FSM4 17 +#define V_RD_DATA_ALIGN_FSM4(x) ((x) << S_RD_DATA_ALIGN_FSM4) +#define F_RD_DATA_ALIGN_FSM4 V_RD_DATA_ALIGN_FSM4(1U) + +#define S_RD_DATA_FETCH_FSM4 16 +#define V_RD_DATA_FETCH_FSM4(x) ((x) << S_RD_DATA_FETCH_FSM4) +#define F_RD_DATA_FETCH_FSM4 V_RD_DATA_FETCH_FSM4(1U) + +#define S_COHERENCY_TX_FSM4 15 +#define V_COHERENCY_TX_FSM4(x) ((x) << S_COHERENCY_TX_FSM4) +#define F_COHERENCY_TX_FSM4 V_COHERENCY_TX_FSM4(1U) + +#define S_COHERENCY_RX_FSM4 14 +#define V_COHERENCY_RX_FSM4(x) ((x) << S_COHERENCY_RX_FSM4) +#define F_COHERENCY_RX_FSM4 V_COHERENCY_RX_FSM4(1U) + +#define S_ARB_REQ_FSM4 13 +#define V_ARB_REQ_FSM4(x) ((x) << S_ARB_REQ_FSM4) +#define F_ARB_REQ_FSM4 V_ARB_REQ_FSM4(1U) + +#define S_CMD_SPLIT_FSM4 10 +#define M_CMD_SPLIT_FSM4 0x7U +#define V_CMD_SPLIT_FSM4(x) ((x) << S_CMD_SPLIT_FSM4) +#define G_CMD_SPLIT_FSM4(x) (((x) >> S_CMD_SPLIT_FSM4) & M_CMD_SPLIT_FSM4) + +#define A_MA_TP_THREAD_0_CLIENT_INTERFACE_INTERNAL_REG1 0xe4a0 + +#define S_WR_DATA_EXT_FIFO_CNT5 30 +#define M_WR_DATA_EXT_FIFO_CNT5 0x3U +#define V_WR_DATA_EXT_FIFO_CNT5(x) ((x) << S_WR_DATA_EXT_FIFO_CNT5) +#define G_WR_DATA_EXT_FIFO_CNT5(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT5) & M_WR_DATA_EXT_FIFO_CNT5) + +#define S_WR_CMD_TAG_FIFO_CNT5 26 +#define M_WR_CMD_TAG_FIFO_CNT5 0xfU +#define V_WR_CMD_TAG_FIFO_CNT5(x) ((x) << S_WR_CMD_TAG_FIFO_CNT5) +#define G_WR_CMD_TAG_FIFO_CNT5(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT5) & M_WR_CMD_TAG_FIFO_CNT5) + +#define S_WR_DATA_512B_FIFO_CNT5 18 +#define M_WR_DATA_512B_FIFO_CNT5 0xffU +#define V_WR_DATA_512B_FIFO_CNT5(x) ((x) << S_WR_DATA_512B_FIFO_CNT5) +#define G_WR_DATA_512B_FIFO_CNT5(x) (((x) >> S_WR_DATA_512B_FIFO_CNT5) & M_WR_DATA_512B_FIFO_CNT5) + +#define S_RD_DATA_ALIGN_FSM5 17 +#define V_RD_DATA_ALIGN_FSM5(x) ((x) << S_RD_DATA_ALIGN_FSM5) +#define F_RD_DATA_ALIGN_FSM5 V_RD_DATA_ALIGN_FSM5(1U) + +#define S_RD_DATA_FETCH_FSM5 16 +#define V_RD_DATA_FETCH_FSM5(x) ((x) << S_RD_DATA_FETCH_FSM5) +#define F_RD_DATA_FETCH_FSM5 V_RD_DATA_FETCH_FSM5(1U) + +#define S_COHERENCY_TX_FSM5 15 +#define V_COHERENCY_TX_FSM5(x) ((x) << S_COHERENCY_TX_FSM5) +#define F_COHERENCY_TX_FSM5 V_COHERENCY_TX_FSM5(1U) + +#define S_COHERENCY_RX_FSM5 14 +#define V_COHERENCY_RX_FSM5(x) ((x) << S_COHERENCY_RX_FSM5) +#define F_COHERENCY_RX_FSM5 V_COHERENCY_RX_FSM5(1U) + +#define S_ARB_REQ_FSM5 13 +#define V_ARB_REQ_FSM5(x) ((x) << S_ARB_REQ_FSM5) +#define F_ARB_REQ_FSM5 V_ARB_REQ_FSM5(1U) + +#define S_CMD_SPLIT_FSM5 10 +#define M_CMD_SPLIT_FSM5 0x7U +#define V_CMD_SPLIT_FSM5(x) ((x) << S_CMD_SPLIT_FSM5) +#define G_CMD_SPLIT_FSM5(x) (((x) >> S_CMD_SPLIT_FSM5) & M_CMD_SPLIT_FSM5) + +#define A_MA_TP_THREAD_1_CLIENT_INTERFACE_INTERNAL_REG1 0xe4c0 + +#define S_WR_DATA_EXT_FIFO_CNT6 30 +#define M_WR_DATA_EXT_FIFO_CNT6 0x3U +#define V_WR_DATA_EXT_FIFO_CNT6(x) ((x) << S_WR_DATA_EXT_FIFO_CNT6) +#define G_WR_DATA_EXT_FIFO_CNT6(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT6) & M_WR_DATA_EXT_FIFO_CNT6) + +#define S_WR_CMD_TAG_FIFO_CNT6 26 +#define M_WR_CMD_TAG_FIFO_CNT6 0xfU +#define V_WR_CMD_TAG_FIFO_CNT6(x) ((x) << S_WR_CMD_TAG_FIFO_CNT6) +#define G_WR_CMD_TAG_FIFO_CNT6(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT6) & M_WR_CMD_TAG_FIFO_CNT6) + +#define S_WR_DATA_512B_FIFO_CNT6 18 +#define M_WR_DATA_512B_FIFO_CNT6 0xffU +#define V_WR_DATA_512B_FIFO_CNT6(x) ((x) << S_WR_DATA_512B_FIFO_CNT6) +#define G_WR_DATA_512B_FIFO_CNT6(x) (((x) >> S_WR_DATA_512B_FIFO_CNT6) & M_WR_DATA_512B_FIFO_CNT6) + +#define S_RD_DATA_ALIGN_FSM6 17 +#define V_RD_DATA_ALIGN_FSM6(x) ((x) << S_RD_DATA_ALIGN_FSM6) +#define F_RD_DATA_ALIGN_FSM6 V_RD_DATA_ALIGN_FSM6(1U) + +#define S_RD_DATA_FETCH_FSM6 16 +#define V_RD_DATA_FETCH_FSM6(x) ((x) << S_RD_DATA_FETCH_FSM6) +#define F_RD_DATA_FETCH_FSM6 V_RD_DATA_FETCH_FSM6(1U) + +#define S_COHERENCY_TX_FSM6 15 +#define V_COHERENCY_TX_FSM6(x) ((x) << S_COHERENCY_TX_FSM6) +#define F_COHERENCY_TX_FSM6 V_COHERENCY_TX_FSM6(1U) + +#define S_COHERENCY_RX_FSM6 14 +#define V_COHERENCY_RX_FSM6(x) ((x) << S_COHERENCY_RX_FSM6) +#define F_COHERENCY_RX_FSM6 V_COHERENCY_RX_FSM6(1U) + +#define S_ARB_REQ_FSM6 13 +#define V_ARB_REQ_FSM6(x) ((x) << S_ARB_REQ_FSM6) +#define F_ARB_REQ_FSM6 V_ARB_REQ_FSM6(1U) + +#define S_CMD_SPLIT_FSM6 10 +#define M_CMD_SPLIT_FSM6 0x7U +#define V_CMD_SPLIT_FSM6(x) ((x) << S_CMD_SPLIT_FSM6) +#define G_CMD_SPLIT_FSM6(x) (((x) >> S_CMD_SPLIT_FSM6) & M_CMD_SPLIT_FSM6) + +#define A_MA_LE_CLIENT_INTERFACE_INTERNAL_REG1 0xe4e0 + +#define S_WR_DATA_EXT_FIFO_CNT7 30 +#define M_WR_DATA_EXT_FIFO_CNT7 0x3U +#define V_WR_DATA_EXT_FIFO_CNT7(x) ((x) << S_WR_DATA_EXT_FIFO_CNT7) +#define G_WR_DATA_EXT_FIFO_CNT7(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT7) & M_WR_DATA_EXT_FIFO_CNT7) + +#define S_WR_CMD_TAG_FIFO_CNT7 26 +#define M_WR_CMD_TAG_FIFO_CNT7 0xfU +#define V_WR_CMD_TAG_FIFO_CNT7(x) ((x) << S_WR_CMD_TAG_FIFO_CNT7) +#define G_WR_CMD_TAG_FIFO_CNT7(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT7) & M_WR_CMD_TAG_FIFO_CNT7) + +#define S_WR_DATA_512B_FIFO_CNT7 18 +#define M_WR_DATA_512B_FIFO_CNT7 0xffU +#define V_WR_DATA_512B_FIFO_CNT7(x) ((x) << S_WR_DATA_512B_FIFO_CNT7) +#define G_WR_DATA_512B_FIFO_CNT7(x) (((x) >> S_WR_DATA_512B_FIFO_CNT7) & M_WR_DATA_512B_FIFO_CNT7) + +#define S_RD_DATA_ALIGN_FSM7 17 +#define V_RD_DATA_ALIGN_FSM7(x) ((x) << S_RD_DATA_ALIGN_FSM7) +#define F_RD_DATA_ALIGN_FSM7 V_RD_DATA_ALIGN_FSM7(1U) + +#define S_RD_DATA_FETCH_FSM7 16 +#define V_RD_DATA_FETCH_FSM7(x) ((x) << S_RD_DATA_FETCH_FSM7) +#define F_RD_DATA_FETCH_FSM7 V_RD_DATA_FETCH_FSM7(1U) + +#define S_COHERENCY_TX_FSM7 15 +#define V_COHERENCY_TX_FSM7(x) ((x) << S_COHERENCY_TX_FSM7) +#define F_COHERENCY_TX_FSM7 V_COHERENCY_TX_FSM7(1U) + +#define S_COHERENCY_RX_FSM7 14 +#define V_COHERENCY_RX_FSM7(x) ((x) << S_COHERENCY_RX_FSM7) +#define F_COHERENCY_RX_FSM7 V_COHERENCY_RX_FSM7(1U) + +#define S_ARB_REQ_FSM7 13 +#define V_ARB_REQ_FSM7(x) ((x) << S_ARB_REQ_FSM7) +#define F_ARB_REQ_FSM7 V_ARB_REQ_FSM7(1U) + +#define S_CMD_SPLIT_FSM7 10 +#define M_CMD_SPLIT_FSM7 0x7U +#define V_CMD_SPLIT_FSM7(x) ((x) << S_CMD_SPLIT_FSM7) +#define G_CMD_SPLIT_FSM7(x) (((x) >> S_CMD_SPLIT_FSM7) & M_CMD_SPLIT_FSM7) + +#define A_MA_CIM_CLIENT_INTERFACE_INTERNAL_REG1 0xe500 + +#define S_WR_DATA_EXT_FIFO_CNT8 30 +#define M_WR_DATA_EXT_FIFO_CNT8 0x3U +#define V_WR_DATA_EXT_FIFO_CNT8(x) ((x) << S_WR_DATA_EXT_FIFO_CNT8) +#define G_WR_DATA_EXT_FIFO_CNT8(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT8) & M_WR_DATA_EXT_FIFO_CNT8) + +#define S_WR_CMD_TAG_FIFO_CNT8 26 +#define M_WR_CMD_TAG_FIFO_CNT8 0xfU +#define V_WR_CMD_TAG_FIFO_CNT8(x) ((x) << S_WR_CMD_TAG_FIFO_CNT8) +#define G_WR_CMD_TAG_FIFO_CNT8(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT8) & M_WR_CMD_TAG_FIFO_CNT8) + +#define S_WR_DATA_512B_FIFO_CNT8 18 +#define M_WR_DATA_512B_FIFO_CNT8 0xffU +#define V_WR_DATA_512B_FIFO_CNT8(x) ((x) << S_WR_DATA_512B_FIFO_CNT8) +#define G_WR_DATA_512B_FIFO_CNT8(x) (((x) >> S_WR_DATA_512B_FIFO_CNT8) & M_WR_DATA_512B_FIFO_CNT8) + +#define S_RD_DATA_ALIGN_FSM8 17 +#define V_RD_DATA_ALIGN_FSM8(x) ((x) << S_RD_DATA_ALIGN_FSM8) +#define F_RD_DATA_ALIGN_FSM8 V_RD_DATA_ALIGN_FSM8(1U) + +#define S_RD_DATA_FETCH_FSM8 16 +#define V_RD_DATA_FETCH_FSM8(x) ((x) << S_RD_DATA_FETCH_FSM8) +#define F_RD_DATA_FETCH_FSM8 V_RD_DATA_FETCH_FSM8(1U) + +#define S_COHERENCY_TX_FSM8 15 +#define V_COHERENCY_TX_FSM8(x) ((x) << S_COHERENCY_TX_FSM8) +#define F_COHERENCY_TX_FSM8 V_COHERENCY_TX_FSM8(1U) + +#define S_COHERENCY_RX_FSM8 14 +#define V_COHERENCY_RX_FSM8(x) ((x) << S_COHERENCY_RX_FSM8) +#define F_COHERENCY_RX_FSM8 V_COHERENCY_RX_FSM8(1U) + +#define S_ARB_REQ_FSM8 13 +#define V_ARB_REQ_FSM8(x) ((x) << S_ARB_REQ_FSM8) +#define F_ARB_REQ_FSM8 V_ARB_REQ_FSM8(1U) + +#define S_CMD_SPLIT_FSM8 10 +#define M_CMD_SPLIT_FSM8 0x7U +#define V_CMD_SPLIT_FSM8(x) ((x) << S_CMD_SPLIT_FSM8) +#define G_CMD_SPLIT_FSM8(x) (((x) >> S_CMD_SPLIT_FSM8) & M_CMD_SPLIT_FSM8) + +#define A_MA_PCIE_CLIENT_INTERFACE_INTERNAL_REG1 0xe520 + +#define S_WR_DATA_EXT_FIFO_CNT9 30 +#define M_WR_DATA_EXT_FIFO_CNT9 0x3U +#define V_WR_DATA_EXT_FIFO_CNT9(x) ((x) << S_WR_DATA_EXT_FIFO_CNT9) +#define G_WR_DATA_EXT_FIFO_CNT9(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT9) & M_WR_DATA_EXT_FIFO_CNT9) + +#define S_WR_CMD_TAG_FIFO_CNT9 26 +#define M_WR_CMD_TAG_FIFO_CNT9 0xfU +#define V_WR_CMD_TAG_FIFO_CNT9(x) ((x) << S_WR_CMD_TAG_FIFO_CNT9) +#define G_WR_CMD_TAG_FIFO_CNT9(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT9) & M_WR_CMD_TAG_FIFO_CNT9) + +#define S_WR_DATA_512B_FIFO_CNT9 18 +#define M_WR_DATA_512B_FIFO_CNT9 0xffU +#define V_WR_DATA_512B_FIFO_CNT9(x) ((x) << S_WR_DATA_512B_FIFO_CNT9) +#define G_WR_DATA_512B_FIFO_CNT9(x) (((x) >> S_WR_DATA_512B_FIFO_CNT9) & M_WR_DATA_512B_FIFO_CNT9) + +#define S_RD_DATA_ALIGN_FSM9 17 +#define V_RD_DATA_ALIGN_FSM9(x) ((x) << S_RD_DATA_ALIGN_FSM9) +#define F_RD_DATA_ALIGN_FSM9 V_RD_DATA_ALIGN_FSM9(1U) + +#define S_RD_DATA_FETCH_FSM9 16 +#define V_RD_DATA_FETCH_FSM9(x) ((x) << S_RD_DATA_FETCH_FSM9) +#define F_RD_DATA_FETCH_FSM9 V_RD_DATA_FETCH_FSM9(1U) + +#define S_COHERENCY_TX_FSM9 15 +#define V_COHERENCY_TX_FSM9(x) ((x) << S_COHERENCY_TX_FSM9) +#define F_COHERENCY_TX_FSM9 V_COHERENCY_TX_FSM9(1U) + +#define S_COHERENCY_RX_FSM9 14 +#define V_COHERENCY_RX_FSM9(x) ((x) << S_COHERENCY_RX_FSM9) +#define F_COHERENCY_RX_FSM9 V_COHERENCY_RX_FSM9(1U) + +#define S_ARB_REQ_FSM9 13 +#define V_ARB_REQ_FSM9(x) ((x) << S_ARB_REQ_FSM9) +#define F_ARB_REQ_FSM9 V_ARB_REQ_FSM9(1U) + +#define S_CMD_SPLIT_FSM9 10 +#define M_CMD_SPLIT_FSM9 0x7U +#define V_CMD_SPLIT_FSM9(x) ((x) << S_CMD_SPLIT_FSM9) +#define G_CMD_SPLIT_FSM9(x) (((x) >> S_CMD_SPLIT_FSM9) & M_CMD_SPLIT_FSM9) + +#define A_MA_PM_TX_CLIENT_INTERFACE_INTERNAL_REG1 0xe540 + +#define S_WR_DATA_EXT_FIFO_CNT10 30 +#define M_WR_DATA_EXT_FIFO_CNT10 0x3U +#define V_WR_DATA_EXT_FIFO_CNT10(x) ((x) << S_WR_DATA_EXT_FIFO_CNT10) +#define G_WR_DATA_EXT_FIFO_CNT10(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT10) & M_WR_DATA_EXT_FIFO_CNT10) + +#define S_WR_CMD_TAG_FIFO_CNT10 26 +#define M_WR_CMD_TAG_FIFO_CNT10 0xfU +#define V_WR_CMD_TAG_FIFO_CNT10(x) ((x) << S_WR_CMD_TAG_FIFO_CNT10) +#define G_WR_CMD_TAG_FIFO_CNT10(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT10) & M_WR_CMD_TAG_FIFO_CNT10) + +#define S_WR_DATA_512B_FIFO_CNT10 18 +#define M_WR_DATA_512B_FIFO_CNT10 0xffU +#define V_WR_DATA_512B_FIFO_CNT10(x) ((x) << S_WR_DATA_512B_FIFO_CNT10) +#define G_WR_DATA_512B_FIFO_CNT10(x) (((x) >> S_WR_DATA_512B_FIFO_CNT10) & M_WR_DATA_512B_FIFO_CNT10) + +#define S_RD_DATA_ALIGN_FSM10 17 +#define V_RD_DATA_ALIGN_FSM10(x) ((x) << S_RD_DATA_ALIGN_FSM10) +#define F_RD_DATA_ALIGN_FSM10 V_RD_DATA_ALIGN_FSM10(1U) + +#define S_RD_DATA_FETCH_FSM10 16 +#define V_RD_DATA_FETCH_FSM10(x) ((x) << S_RD_DATA_FETCH_FSM10) +#define F_RD_DATA_FETCH_FSM10 V_RD_DATA_FETCH_FSM10(1U) + +#define S_COHERENCY_TX_FSM10 15 +#define V_COHERENCY_TX_FSM10(x) ((x) << S_COHERENCY_TX_FSM10) +#define F_COHERENCY_TX_FSM10 V_COHERENCY_TX_FSM10(1U) + +#define S_COHERENCY_RX_FSM10 14 +#define V_COHERENCY_RX_FSM10(x) ((x) << S_COHERENCY_RX_FSM10) +#define F_COHERENCY_RX_FSM10 V_COHERENCY_RX_FSM10(1U) + +#define S_ARB_REQ_FSM10 13 +#define V_ARB_REQ_FSM10(x) ((x) << S_ARB_REQ_FSM10) +#define F_ARB_REQ_FSM10 V_ARB_REQ_FSM10(1U) + +#define S_CMD_SPLIT_FSM10 10 +#define M_CMD_SPLIT_FSM10 0x7U +#define V_CMD_SPLIT_FSM10(x) ((x) << S_CMD_SPLIT_FSM10) +#define G_CMD_SPLIT_FSM10(x) (((x) >> S_CMD_SPLIT_FSM10) & M_CMD_SPLIT_FSM10) + +#define A_MA_PM_RX_CLIENT_INTERFACE_INTERNAL_REG1 0xe560 + +#define S_WR_DATA_EXT_FIFO_CNT11 30 +#define M_WR_DATA_EXT_FIFO_CNT11 0x3U +#define V_WR_DATA_EXT_FIFO_CNT11(x) ((x) << S_WR_DATA_EXT_FIFO_CNT11) +#define G_WR_DATA_EXT_FIFO_CNT11(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT11) & M_WR_DATA_EXT_FIFO_CNT11) + +#define S_WR_CMD_TAG_FIFO_CNT11 26 +#define M_WR_CMD_TAG_FIFO_CNT11 0xfU +#define V_WR_CMD_TAG_FIFO_CNT11(x) ((x) << S_WR_CMD_TAG_FIFO_CNT11) +#define G_WR_CMD_TAG_FIFO_CNT11(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT11) & M_WR_CMD_TAG_FIFO_CNT11) + +#define S_WR_DATA_512B_FIFO_CNT11 18 +#define M_WR_DATA_512B_FIFO_CNT11 0xffU +#define V_WR_DATA_512B_FIFO_CNT11(x) ((x) << S_WR_DATA_512B_FIFO_CNT11) +#define G_WR_DATA_512B_FIFO_CNT11(x) (((x) >> S_WR_DATA_512B_FIFO_CNT11) & M_WR_DATA_512B_FIFO_CNT11) + +#define S_RD_DATA_ALIGN_FSM11 17 +#define V_RD_DATA_ALIGN_FSM11(x) ((x) << S_RD_DATA_ALIGN_FSM11) +#define F_RD_DATA_ALIGN_FSM11 V_RD_DATA_ALIGN_FSM11(1U) + +#define S_RD_DATA_FETCH_FSM11 16 +#define V_RD_DATA_FETCH_FSM11(x) ((x) << S_RD_DATA_FETCH_FSM11) +#define F_RD_DATA_FETCH_FSM11 V_RD_DATA_FETCH_FSM11(1U) + +#define S_COHERENCY_TX_FSM11 15 +#define V_COHERENCY_TX_FSM11(x) ((x) << S_COHERENCY_TX_FSM11) +#define F_COHERENCY_TX_FSM11 V_COHERENCY_TX_FSM11(1U) + +#define S_COHERENCY_RX_FSM11 14 +#define V_COHERENCY_RX_FSM11(x) ((x) << S_COHERENCY_RX_FSM11) +#define F_COHERENCY_RX_FSM11 V_COHERENCY_RX_FSM11(1U) + +#define S_ARB_REQ_FSM11 13 +#define V_ARB_REQ_FSM11(x) ((x) << S_ARB_REQ_FSM11) +#define F_ARB_REQ_FSM11 V_ARB_REQ_FSM11(1U) + +#define S_CMD_SPLIT_FSM11 10 +#define M_CMD_SPLIT_FSM11 0x7U +#define V_CMD_SPLIT_FSM11(x) ((x) << S_CMD_SPLIT_FSM11) +#define G_CMD_SPLIT_FSM11(x) (((x) >> S_CMD_SPLIT_FSM11) & M_CMD_SPLIT_FSM11) + +#define A_MA_HMA_CLIENT_INTERFACE_INTERNAL_REG1 0xe580 + +#define S_WR_DATA_EXT_FIFO_CNT12 30 +#define M_WR_DATA_EXT_FIFO_CNT12 0x3U +#define V_WR_DATA_EXT_FIFO_CNT12(x) ((x) << S_WR_DATA_EXT_FIFO_CNT12) +#define G_WR_DATA_EXT_FIFO_CNT12(x) (((x) >> S_WR_DATA_EXT_FIFO_CNT12) & M_WR_DATA_EXT_FIFO_CNT12) + +#define S_WR_CMD_TAG_FIFO_CNT12 26 +#define M_WR_CMD_TAG_FIFO_CNT12 0xfU +#define V_WR_CMD_TAG_FIFO_CNT12(x) ((x) << S_WR_CMD_TAG_FIFO_CNT12) +#define G_WR_CMD_TAG_FIFO_CNT12(x) (((x) >> S_WR_CMD_TAG_FIFO_CNT12) & M_WR_CMD_TAG_FIFO_CNT12) + +#define S_WR_DATA_512B_FIFO_CNT12 18 +#define M_WR_DATA_512B_FIFO_CNT12 0xffU +#define V_WR_DATA_512B_FIFO_CNT12(x) ((x) << S_WR_DATA_512B_FIFO_CNT12) +#define G_WR_DATA_512B_FIFO_CNT12(x) (((x) >> S_WR_DATA_512B_FIFO_CNT12) & M_WR_DATA_512B_FIFO_CNT12) + +#define S_RD_DATA_ALIGN_FSM12 17 +#define V_RD_DATA_ALIGN_FSM12(x) ((x) << S_RD_DATA_ALIGN_FSM12) +#define F_RD_DATA_ALIGN_FSM12 V_RD_DATA_ALIGN_FSM12(1U) + +#define S_RD_DATA_FETCH_FSM12 16 +#define V_RD_DATA_FETCH_FSM12(x) ((x) << S_RD_DATA_FETCH_FSM12) +#define F_RD_DATA_FETCH_FSM12 V_RD_DATA_FETCH_FSM12(1U) + +#define S_COHERENCY_TX_FSM12 15 +#define V_COHERENCY_TX_FSM12(x) ((x) << S_COHERENCY_TX_FSM12) +#define F_COHERENCY_TX_FSM12 V_COHERENCY_TX_FSM12(1U) + +#define S_COHERENCY_RX_FSM12 14 +#define V_COHERENCY_RX_FSM12(x) ((x) << S_COHERENCY_RX_FSM12) +#define F_COHERENCY_RX_FSM12 V_COHERENCY_RX_FSM12(1U) + +#define S_ARB_REQ_FSM12 13 +#define V_ARB_REQ_FSM12(x) ((x) << S_ARB_REQ_FSM12) +#define F_ARB_REQ_FSM12 V_ARB_REQ_FSM12(1U) + +#define S_CMD_SPLIT_FSM12 10 +#define M_CMD_SPLIT_FSM12 0x7U +#define V_CMD_SPLIT_FSM12(x) ((x) << S_CMD_SPLIT_FSM12) +#define G_CMD_SPLIT_FSM12(x) (((x) >> S_CMD_SPLIT_FSM12) & M_CMD_SPLIT_FSM12) + +#define A_MA_TARGET_0_ARBITER_INTERFACE_INTERNAL_REG1 0xe5a0 + +#define S_RD_CMD_TAG_FIFO_CNT0 8 +#define M_RD_CMD_TAG_FIFO_CNT0 0xffU +#define V_RD_CMD_TAG_FIFO_CNT0(x) ((x) << S_RD_CMD_TAG_FIFO_CNT0) +#define G_RD_CMD_TAG_FIFO_CNT0(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT0) & M_RD_CMD_TAG_FIFO_CNT0) + +#define S_RD_DATA_FIFO_CNT0 0 +#define M_RD_DATA_FIFO_CNT0 0xffU +#define V_RD_DATA_FIFO_CNT0(x) ((x) << S_RD_DATA_FIFO_CNT0) +#define G_RD_DATA_FIFO_CNT0(x) (((x) >> S_RD_DATA_FIFO_CNT0) & M_RD_DATA_FIFO_CNT0) + +#define A_MA_TARGET_1_ARBITER_INTERFACE_INTERNAL_REG1 0xe5c0 + +#define S_RD_CMD_TAG_FIFO_CNT1 8 +#define M_RD_CMD_TAG_FIFO_CNT1 0xffU +#define V_RD_CMD_TAG_FIFO_CNT1(x) ((x) << S_RD_CMD_TAG_FIFO_CNT1) +#define G_RD_CMD_TAG_FIFO_CNT1(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT1) & M_RD_CMD_TAG_FIFO_CNT1) + +#define S_RD_DATA_FIFO_CNT1 0 +#define M_RD_DATA_FIFO_CNT1 0xffU +#define V_RD_DATA_FIFO_CNT1(x) ((x) << S_RD_DATA_FIFO_CNT1) +#define G_RD_DATA_FIFO_CNT1(x) (((x) >> S_RD_DATA_FIFO_CNT1) & M_RD_DATA_FIFO_CNT1) + +#define A_MA_TARGET_2_ARBITER_INTERFACE_INTERNAL_REG1 0xe5e0 + +#define S_RD_CMD_TAG_FIFO_CNT2 8 +#define M_RD_CMD_TAG_FIFO_CNT2 0xffU +#define V_RD_CMD_TAG_FIFO_CNT2(x) ((x) << S_RD_CMD_TAG_FIFO_CNT2) +#define G_RD_CMD_TAG_FIFO_CNT2(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT2) & M_RD_CMD_TAG_FIFO_CNT2) + +#define S_RD_DATA_FIFO_CNT2 0 +#define M_RD_DATA_FIFO_CNT2 0xffU +#define V_RD_DATA_FIFO_CNT2(x) ((x) << S_RD_DATA_FIFO_CNT2) +#define G_RD_DATA_FIFO_CNT2(x) (((x) >> S_RD_DATA_FIFO_CNT2) & M_RD_DATA_FIFO_CNT2) + +#define A_MA_TARGET_3_ARBITER_INTERFACE_INTERNAL_REG1 0xe600 + +#define S_RD_CMD_TAG_FIFO_CNT3 8 +#define M_RD_CMD_TAG_FIFO_CNT3 0xffU +#define V_RD_CMD_TAG_FIFO_CNT3(x) ((x) << S_RD_CMD_TAG_FIFO_CNT3) +#define G_RD_CMD_TAG_FIFO_CNT3(x) (((x) >> S_RD_CMD_TAG_FIFO_CNT3) & M_RD_CMD_TAG_FIFO_CNT3) + +#define S_RD_DATA_FIFO_CNT3 0 +#define M_RD_DATA_FIFO_CNT3 0xffU +#define V_RD_DATA_FIFO_CNT3(x) ((x) << S_RD_DATA_FIFO_CNT3) +#define G_RD_DATA_FIFO_CNT3(x) (((x) >> S_RD_DATA_FIFO_CNT3) & M_RD_DATA_FIFO_CNT3) + +#define A_MA_SGE_THREAD_0_CLNT_EXP_WR_CYC_CNT_LO 0xe640 +#define A_MA_SGE_THREAD_1_CLNT_EXP_WR_CYC_CNT_LO 0xe660 +#define A_MA_ULP_TX_CLNT_EXP_WR_CYC_CNT_LO 0xe680 +#define A_MA_ULP_RX_CLNT_EXP_WR_CYC_CNT_LO 0xe6a0 +#define A_MA_ULP_TX_RX_CLNT_EXP_WR_CYC_CNT_LO 0xe6c0 +#define A_MA_TP_THREAD_0_CLNT_EXP_WR_CYC_CNT_LO 0xe6e0 +#define A_MA_TP_THREAD_1_CLNT_EXP_WR_CYC_CNT_LO 0xe700 +#define A_MA_LE_CLNT_EXP_WR_CYC_CNT_LO 0xe720 +#define A_MA_CIM_CLNT_EXP_WR_CYC_CNT_LO 0xe740 +#define A_MA_PCIE_CLNT_EXP_WR_CYC_CNT_LO 0xe760 +#define A_MA_PM_TX_CLNT_EXP_WR_CYC_CNT_LO 0xe780 +#define A_MA_PM_RX_CLNT_EXP_WR_CYC_CNT_LO 0xe7a0 +#define A_MA_HMA_CLNT_EXP_WR_CYC_CNT_LO 0xe7c0 +#define A_MA_EDRAM0_WR_REQ_CNT_HI 0xe800 +#define A_MA_EDRAM0_WR_REQ_CNT_LO 0xe820 +#define A_MA_EDRAM1_WR_REQ_CNT_HI 0xe840 +#define A_MA_EDRAM1_WR_REQ_CNT_LO 0xe860 +#define A_MA_EXT_MEMORY0_WR_REQ_CNT_HI 0xe880 +#define A_MA_EXT_MEMORY0_WR_REQ_CNT_LO 0xe8a0 +#define A_MA_EXT_MEMORY1_WR_REQ_CNT_HI 0xe8c0 +#define A_MA_EXT_MEMORY1_WR_REQ_CNT_LO 0xe8e0 +#define A_MA_EDRAM0_RD_REQ_CNT_HI 0xe900 +#define A_MA_EDRAM0_RD_REQ_CNT_LO 0xe920 +#define A_MA_EDRAM1_RD_REQ_CNT_HI 0xe940 +#define A_MA_EDRAM1_RD_REQ_CNT_LO 0xe960 +#define A_MA_EXT_MEMORY0_RD_REQ_CNT_HI 0xe980 +#define A_MA_EXT_MEMORY0_RD_REQ_CNT_LO 0xe9a0 +#define A_MA_EXT_MEMORY1_RD_REQ_CNT_HI 0xe9c0 +#define A_MA_EXT_MEMORY1_RD_REQ_CNT_LO 0xe9e0 +#define A_MA_SGE_THREAD_0_CLNT_ACT_RD_CYC_CNT_HI 0xec00 +#define A_MA_SGE_THREAD_0_CLNT_ACT_RD_CYC_CNT_LO 0xec20 +#define A_MA_SGE_THREAD_1_CLNT_ACT_RD_CYC_CNT_HI 0xec40 +#define A_MA_SGE_THREAD_1_CLNT_ACT_RD_CYC_CNT_LO 0xec60 +#define A_MA_ULP_TX_CLNT_ACT_RD_CYC_CNT_HI 0xec80 +#define A_MA_ULP_TX_CLNT_ACT_RD_CYC_CNT_LO 0xeca0 +#define A_MA_ULP_RX_CLNT_ACT_RD_CYC_CNT_HI 0xecc0 +#define A_MA_ULP_RX_CLNT_ACT_RD_CYC_CNT_LO 0xece0 +#define A_MA_ULP_TX_RX_CLNT_ACT_RD_CYC_CNT_HI 0xed00 +#define A_MA_ULP_TX_RX_CLNT_ACT_RD_CYC_CNT_LO 0xed20 +#define A_MA_TP_THREAD_0_CLNT_ACT_RD_CYC_CNT_HI 0xed40 +#define A_MA_TP_THREAD_0_CLNT_ACT_RD_CYC_CNT_LO 0xed60 +#define A_MA_TP_THREAD_1_CLNT_ACT_RD_CYC_CNT_HI 0xed80 +#define A_MA_TP_THREAD_1_CLNT_ACT_RD_CYC_CNT_LO 0xeda0 +#define A_MA_LE_CLNT_ACT_RD_CYC_CNT_HI 0xedc0 +#define A_MA_LE_CLNT_ACT_RD_CYC_CNT_LO 0xede0 +#define A_MA_CIM_CLNT_ACT_RD_CYC_CNT_HI 0xee00 +#define A_MA_CIM_CLNT_ACT_RD_CYC_CNT_LO 0xee20 +#define A_MA_PCIE_CLNT_ACT_RD_CYC_CNT_HI 0xee40 +#define A_MA_PCIE_CLNT_ACT_RD_CYC_CNT_LO 0xee60 +#define A_MA_PM_TX_CLNT_ACT_RD_CYC_CNT_HI 0xee80 +#define A_MA_PM_TX_CLNT_ACT_RD_CYC_CNT_LO 0xeea0 +#define A_MA_PM_RX_CLNT_ACT_RD_CYC_CNT_HI 0xeec0 +#define A_MA_PM_RX_CLNT_ACT_RD_CYC_CNT_LO 0xeee0 +#define A_MA_HMA_CLNT_ACT_RD_CYC_CNT_HI 0xef00 +#define A_MA_HMA_CLNT_ACT_RD_CYC_CNT_LO 0xef20 +#define A_MA_PM_TX_RD_THROTTLE_STATUS 0xf000 + +#define S_PTMAXTRANS 16 +#define V_PTMAXTRANS(x) ((x) << S_PTMAXTRANS) +#define F_PTMAXTRANS V_PTMAXTRANS(1U) + +#define S_PTFLITCNT 0 +#define M_PTFLITCNT 0xffU +#define V_PTFLITCNT(x) ((x) << S_PTFLITCNT) +#define G_PTFLITCNT(x) (((x) >> S_PTFLITCNT) & M_PTFLITCNT) + +#define A_MA_PM_RX_RD_THROTTLE_STATUS 0xf020 + +#define S_PRMAXTRANS 16 +#define V_PRMAXTRANS(x) ((x) << S_PRMAXTRANS) +#define F_PRMAXTRANS V_PRMAXTRANS(1U) + +#define S_PRFLITCNT 0 +#define M_PRFLITCNT 0xffU +#define V_PRFLITCNT(x) ((x) << S_PRFLITCNT) +#define G_PRFLITCNT(x) (((x) >> S_PRFLITCNT) & M_PRFLITCNT) /* registers for module EDC_0 */ #define EDC_0_BASE_ADDR 0x7900 @@ -12567,6 +20585,14 @@ #define V_OBQSGERX0PARERR(x) ((x) << S_OBQSGERX0PARERR) #define F_OBQSGERX0PARERR V_OBQSGERX0PARERR(1U) +#define S_PCIE2CIMINTFPARERR 29 +#define V_PCIE2CIMINTFPARERR(x) ((x) << S_PCIE2CIMINTFPARERR) +#define F_PCIE2CIMINTFPARERR V_PCIE2CIMINTFPARERR(1U) + +#define S_IBQPCIEPARERR 12 +#define V_IBQPCIEPARERR(x) ((x) << S_IBQPCIEPARERR) +#define F_IBQPCIEPARERR V_IBQPCIEPARERR(1U) + #define A_CIM_HOST_INT_CAUSE 0x7b2c #define S_TIEQOUTPARERRINT 20 @@ -12895,6 +20921,10 @@ #define V_QUEFULLTHRSH(x) ((x) << S_QUEFULLTHRSH) #define G_QUEFULLTHRSH(x) (((x) >> S_QUEFULLTHRSH) & M_QUEFULLTHRSH) +#define S_CIMQ1KEN 30 +#define V_CIMQ1KEN(x) ((x) << S_CIMQ1KEN) +#define F_CIMQ1KEN V_CIMQ1KEN(1U) + #define A_CIM_HOST_ACC_CTRL 0x7b50 #define S_HOSTBUSY 17 @@ -13111,6 +21141,11 @@ #define V_DADDRTIMEOUT(x) ((x) << S_DADDRTIMEOUT) #define G_DADDRTIMEOUT(x) (((x) >> S_DADDRTIMEOUT) & M_DADDRTIMEOUT) +#define S_DADDRTIMEOUTTYPE 0 +#define M_DADDRTIMEOUTTYPE 0x3U +#define V_DADDRTIMEOUTTYPE(x) ((x) << S_DADDRTIMEOUTTYPE) +#define G_DADDRTIMEOUTTYPE(x) (((x) >> S_DADDRTIMEOUTTYPE) & M_DADDRTIMEOUTTYPE) + #define A_CIM_DEBUG_ADDR_ILLEGAL 0x7c0c #define S_DADDRILLEGAL 2 @@ -13118,6 +21153,11 @@ #define V_DADDRILLEGAL(x) ((x) << S_DADDRILLEGAL) #define G_DADDRILLEGAL(x) (((x) >> S_DADDRILLEGAL) & M_DADDRILLEGAL) +#define S_DADDRILLEGALTYPE 0 +#define M_DADDRILLEGALTYPE 0x3U +#define V_DADDRILLEGALTYPE(x) ((x) << S_DADDRILLEGALTYPE) +#define G_DADDRILLEGALTYPE(x) (((x) >> S_DADDRILLEGALTYPE) & M_DADDRILLEGALTYPE) + #define A_CIM_DEBUG_PIF_CAUSE_MASK 0x7c10 #define S_DPIFHOSTMASK 0 @@ -13130,6 +21170,11 @@ #define V_T5_DPIFHOSTMASK(x) ((x) << S_T5_DPIFHOSTMASK) #define G_T5_DPIFHOSTMASK(x) (((x) >> S_T5_DPIFHOSTMASK) & M_T5_DPIFHOSTMASK) +#define S_T6_T5_DPIFHOSTMASK 0 +#define M_T6_T5_DPIFHOSTMASK 0x3fffffffU +#define V_T6_T5_DPIFHOSTMASK(x) ((x) << S_T6_T5_DPIFHOSTMASK) +#define G_T6_T5_DPIFHOSTMASK(x) (((x) >> S_T6_T5_DPIFHOSTMASK) & M_T6_T5_DPIFHOSTMASK) + #define A_CIM_DEBUG_PIF_UPACC_CAUSE_MASK 0x7c14 #define S_DPIFHUPAMASK 0 @@ -13149,6 +21194,11 @@ #define V_T5_DUPMASK(x) ((x) << S_T5_DUPMASK) #define G_T5_DUPMASK(x) (((x) >> S_T5_DUPMASK) & M_T5_DUPMASK) +#define S_T6_T5_DUPMASK 0 +#define M_T6_T5_DUPMASK 0x3fffffffU +#define V_T6_T5_DUPMASK(x) ((x) << S_T6_T5_DUPMASK) +#define G_T6_T5_DUPMASK(x) (((x) >> S_T6_T5_DUPMASK) & M_T6_T5_DUPMASK) + #define A_CIM_DEBUG_UP_UPACC_CAUSE_MASK 0x7c1c #define S_DUPUACCMASK 0 @@ -13169,6 +21219,11 @@ #define V_T5_PERREN(x) ((x) << S_T5_PERREN) #define G_T5_PERREN(x) (((x) >> S_T5_PERREN) & M_T5_PERREN) +#define S_T6_T5_PERREN 0 +#define M_T6_T5_PERREN 0x3fffffffU +#define V_T6_T5_PERREN(x) ((x) << S_T6_T5_PERREN) +#define G_T6_T5_PERREN(x) (((x) >> S_T6_T5_PERREN) & M_T6_T5_PERREN) + #define A_CIM_EEPROM_BUSY_BIT 0x7c28 #define S_EEPROMBUSY 0 @@ -13181,6 +21236,10 @@ #define V_MA_TIMER_ENABLE(x) ((x) << S_MA_TIMER_ENABLE) #define F_MA_TIMER_ENABLE V_MA_TIMER_ENABLE(1U) +#define S_SLOW_TIMER_ENABLE 1 +#define V_SLOW_TIMER_ENABLE(x) ((x) << S_SLOW_TIMER_ENABLE) +#define F_SLOW_TIMER_ENABLE V_SLOW_TIMER_ENABLE(1U) + #define A_CIM_UP_PO_SINGLE_OUTSTANDING 0x7c30 #define S_UP_PO_SINGLE_OUTSTANDING 0 @@ -13206,6 +21265,11 @@ #define V_CIM_SGE0_PKT_ERR_CODE(x) ((x) << S_CIM_SGE0_PKT_ERR_CODE) #define G_CIM_SGE0_PKT_ERR_CODE(x) (((x) >> S_CIM_SGE0_PKT_ERR_CODE) & M_CIM_SGE0_PKT_ERR_CODE) +#define S_CIM_PCIE_PKT_ERR_CODE 8 +#define M_CIM_PCIE_PKT_ERR_CODE 0xffU +#define V_CIM_PCIE_PKT_ERR_CODE(x) ((x) << S_CIM_PCIE_PKT_ERR_CODE) +#define G_CIM_PCIE_PKT_ERR_CODE(x) (((x) >> S_CIM_PCIE_PKT_ERR_CODE) & M_CIM_PCIE_PKT_ERR_CODE) + #define A_CIM_IBQ_DBG_WAIT_COUNTER 0x7c40 #define A_CIM_PIO_UP_MST_CFG_SEL 0x7c44 @@ -13241,6 +21305,10 @@ #define V_IBQ_SKID_FIFO_EOP_FLSH_DSBL(x) ((x) << S_IBQ_SKID_FIFO_EOP_FLSH_DSBL) #define F_IBQ_SKID_FIFO_EOP_FLSH_DSBL V_IBQ_SKID_FIFO_EOP_FLSH_DSBL(1U) +#define S_PCIE_OBQ_IF_DISABLE 5 +#define V_PCIE_OBQ_IF_DISABLE(x) ((x) << S_PCIE_OBQ_IF_DISABLE) +#define F_PCIE_OBQ_IF_DISABLE V_PCIE_OBQ_IF_DISABLE(1U) + #define A_CIM_CGEN_GLOBAL 0x7c50 #define S_CGEN_GLOBAL 0 @@ -13414,6 +21482,18 @@ #define V_ETHUPEN(x) ((x) << S_ETHUPEN) #define F_ETHUPEN V_ETHUPEN(1U) +#define S_CXOFFOVERRIDE 3 +#define V_CXOFFOVERRIDE(x) ((x) << S_CXOFFOVERRIDE) +#define F_CXOFFOVERRIDE V_CXOFFOVERRIDE(1U) + +#define S_EGREDROPEN 1 +#define V_EGREDROPEN(x) ((x) << S_EGREDROPEN) +#define F_EGREDROPEN V_EGREDROPEN(1U) + +#define S_CFASTDEMUXEN 0 +#define V_CFASTDEMUXEN(x) ((x) << S_CFASTDEMUXEN) +#define F_CFASTDEMUXEN V_CFASTDEMUXEN(1U) + #define A_TP_OUT_CONFIG 0x7d04 #define S_PORTQFCEN 28 @@ -13509,6 +21589,30 @@ #define V_EVNTAGEN(x) ((x) << S_EVNTAGEN) #define F_EVNTAGEN V_EVNTAGEN(1U) +#define S_CCPLACKMODE 13 +#define V_CCPLACKMODE(x) ((x) << S_CCPLACKMODE) +#define F_CCPLACKMODE V_CCPLACKMODE(1U) + +#define S_RMWHINTENABLE 12 +#define V_RMWHINTENABLE(x) ((x) << S_RMWHINTENABLE) +#define F_RMWHINTENABLE V_RMWHINTENABLE(1U) + +#define S_EV6FLWEN 8 +#define V_EV6FLWEN(x) ((x) << S_EV6FLWEN) +#define F_EV6FLWEN V_EV6FLWEN(1U) + +#define S_EVLANPRIO 6 +#define V_EVLANPRIO(x) ((x) << S_EVLANPRIO) +#define F_EVLANPRIO V_EVLANPRIO(1U) + +#define S_CRXPKTENC 3 +#define V_CRXPKTENC(x) ((x) << S_CRXPKTENC) +#define F_CRXPKTENC V_CRXPKTENC(1U) + +#define S_CRXPKTXT 1 +#define V_CRXPKTXT(x) ((x) << S_CRXPKTXT) +#define F_CRXPKTXT V_CRXPKTXT(1U) + #define A_TP_GLOBAL_CONFIG 0x7d08 #define S_SYNCOOKIEPARAMS 26 @@ -13595,6 +21699,10 @@ #define V_ISSFROMCPLENABLE(x) ((x) << S_ISSFROMCPLENABLE) #define F_ISSFROMCPLENABLE V_ISSFROMCPLENABLE(1U) +#define S_ACTIVEFILTERCOUNTS 22 +#define V_ACTIVEFILTERCOUNTS(x) ((x) << S_ACTIVEFILTERCOUNTS) +#define F_ACTIVEFILTERCOUNTS V_ACTIVEFILTERCOUNTS(1U) + #define A_TP_DB_CONFIG 0x7d0c #define S_DBMAXOPCNT 24 @@ -13879,6 +21987,18 @@ #define V_ENABLEFILTERNAT(x) ((x) << S_ENABLEFILTERNAT) #define F_ENABLEFILTERNAT V_ENABLEFILTERNAT(1U) +#define S_ENABLEFINCHECK 31 +#define V_ENABLEFINCHECK(x) ((x) << S_ENABLEFINCHECK) +#define F_ENABLEFINCHECK V_ENABLEFINCHECK(1U) + +#define S_ENABLEMIBVFPLD 21 +#define V_ENABLEMIBVFPLD(x) ((x) << S_ENABLEMIBVFPLD) +#define F_ENABLEMIBVFPLD V_ENABLEMIBVFPLD(1U) + +#define S_DISABLESEPPSHFLAG 4 +#define V_DISABLESEPPSHFLAG(x) ((x) << S_DISABLESEPPSHFLAG) +#define F_DISABLESEPPSHFLAG V_DISABLESEPPSHFLAG(1U) + #define A_TP_PC_CONFIG2 0x7d4c #define S_ENABLEMTUVFMODE 31 @@ -14176,6 +22296,31 @@ #define V_SETTIMEENABLE(x) ((x) << S_SETTIMEENABLE) #define F_SETTIMEENABLE V_SETTIMEENABLE(1U) +#define S_ECNCNGFIFO 19 +#define V_ECNCNGFIFO(x) ((x) << S_ECNCNGFIFO) +#define F_ECNCNGFIFO V_ECNCNGFIFO(1U) + +#define S_ECNSYNACK 18 +#define V_ECNSYNACK(x) ((x) << S_ECNSYNACK) +#define F_ECNSYNACK V_ECNSYNACK(1U) + +#define S_ECNTHRESH 16 +#define M_ECNTHRESH 0x3U +#define V_ECNTHRESH(x) ((x) << S_ECNTHRESH) +#define G_ECNTHRESH(x) (((x) >> S_ECNTHRESH) & M_ECNTHRESH) + +#define S_ECNMODE 15 +#define V_ECNMODE(x) ((x) << S_ECNMODE) +#define F_ECNMODE V_ECNMODE(1U) + +#define S_ECNMODECWR 14 +#define V_ECNMODECWR(x) ((x) << S_ECNMODECWR) +#define F_ECNMODECWR V_ECNMODECWR(1U) + +#define S_FORCESHOVE 10 +#define V_FORCESHOVE(x) ((x) << S_FORCESHOVE) +#define F_FORCESHOVE V_FORCESHOVE(1U) + #define A_TP_PARA_REG1 0x7d64 #define S_INITRWND 16 @@ -14455,6 +22600,14 @@ #define V_ENABLEFRAGCHECK(x) ((x) << S_ENABLEFRAGCHECK) #define F_ENABLEFRAGCHECK V_ENABLEFRAGCHECK(1U) +#define S_ENABLEFCOECHECK 6 +#define V_ENABLEFCOECHECK(x) ((x) << S_ENABLEFCOECHECK) +#define F_ENABLEFCOECHECK V_ENABLEFCOECHECK(1U) + +#define S_ENABLERDMAFIX 1 +#define V_ENABLERDMAFIX(x) ((x) << S_ENABLERDMAFIX) +#define F_ENABLERDMAFIX V_ENABLERDMAFIX(1U) + #define A_TP_PARA_REG6 0x7d78 #define S_TXPDUSIZEADJ 24 @@ -14551,6 +22704,14 @@ #define V_DISABLEPDUACK(x) ((x) << S_DISABLEPDUACK) #define F_DISABLEPDUACK V_DISABLEPDUACK(1U) +#define S_TXTCAMKEY 22 +#define V_TXTCAMKEY(x) ((x) << S_TXTCAMKEY) +#define F_TXTCAMKEY V_TXTCAMKEY(1U) + +#define S_ENABLECBYP 21 +#define V_ENABLECBYP(x) ((x) << S_ENABLECBYP) +#define F_ENABLECBYP V_ENABLECBYP(1U) + #define A_TP_PARA_REG7 0x7d7c #define S_PMMAXXFERLEN1 16 @@ -14600,6 +22761,20 @@ #define V_ENGINELATENCYBASE(x) ((x) << S_ENGINELATENCYBASE) #define G_ENGINELATENCYBASE(x) (((x) >> S_ENGINELATENCYBASE) & M_ENGINELATENCYBASE) +#define A_TP_PARA_REG8 0x7d84 + +#define S_ECNACKECT 2 +#define V_ECNACKECT(x) ((x) << S_ECNACKECT) +#define F_ECNACKECT V_ECNACKECT(1U) + +#define S_ECNFINECT 1 +#define V_ECNFINECT(x) ((x) << S_ECNFINECT) +#define F_ECNFINECT V_ECNFINECT(1U) + +#define S_ECNSYNECT 0 +#define V_ECNSYNECT(x) ((x) << S_ECNSYNECT) +#define F_ECNSYNECT V_ECNSYNECT(1U) + #define A_TP_ERR_CONFIG 0x7d8c #define S_TNLERRORPING 30 @@ -14714,6 +22889,22 @@ #define V_DROPERRORFPMA(x) ((x) << S_DROPERRORFPMA) #define F_DROPERRORFPMA V_DROPERRORFPMA(1U) +#define S_TNLERROROPAQUE 27 +#define V_TNLERROROPAQUE(x) ((x) << S_TNLERROROPAQUE) +#define F_TNLERROROPAQUE V_TNLERROROPAQUE(1U) + +#define S_TNLERRORIP6OPT 26 +#define V_TNLERRORIP6OPT(x) ((x) << S_TNLERRORIP6OPT) +#define F_TNLERRORIP6OPT V_TNLERRORIP6OPT(1U) + +#define S_DROPERROROPAQUE 11 +#define V_DROPERROROPAQUE(x) ((x) << S_DROPERROROPAQUE) +#define F_DROPERROROPAQUE V_DROPERROROPAQUE(1U) + +#define S_DROPERRORIP6OPT 10 +#define V_DROPERRORIP6OPT(x) ((x) << S_DROPERRORIP6OPT) +#define F_DROPERRORIP6OPT V_DROPERRORIP6OPT(1U) + #define A_TP_TIMER_RESOLUTION 0x7d90 #define S_TIMERRESOLUTION 16 @@ -14850,6 +23041,11 @@ #define V_KEEPALIVEMAXR2(x) ((x) << S_KEEPALIVEMAXR2) #define G_KEEPALIVEMAXR2(x) (((x) >> S_KEEPALIVEMAXR2) & M_KEEPALIVEMAXR2) +#define S_T6_SYNSHIFTMAX 24 +#define M_T6_SYNSHIFTMAX 0xfU +#define V_T6_SYNSHIFTMAX(x) ((x) << S_T6_SYNSHIFTMAX) +#define G_T6_SYNSHIFTMAX(x) (((x) >> S_T6_SYNSHIFTMAX) & M_T6_SYNSHIFTMAX) + #define A_TP_TM_CONFIG 0x7dc4 #define S_CMTIMERMAXNUM 0 @@ -14955,6 +23151,78 @@ #define V_ULPTYPE0FIELD(x) ((x) << S_ULPTYPE0FIELD) #define G_ULPTYPE0FIELD(x) (((x) >> S_ULPTYPE0FIELD) & M_ULPTYPE0FIELD) +#define S_ULPTYPE7LENGTH 31 +#define V_ULPTYPE7LENGTH(x) ((x) << S_ULPTYPE7LENGTH) +#define F_ULPTYPE7LENGTH V_ULPTYPE7LENGTH(1U) + +#define S_ULPTYPE7OFFSET 28 +#define M_ULPTYPE7OFFSET 0x7U +#define V_ULPTYPE7OFFSET(x) ((x) << S_ULPTYPE7OFFSET) +#define G_ULPTYPE7OFFSET(x) (((x) >> S_ULPTYPE7OFFSET) & M_ULPTYPE7OFFSET) + +#define S_ULPTYPE6LENGTH 27 +#define V_ULPTYPE6LENGTH(x) ((x) << S_ULPTYPE6LENGTH) +#define F_ULPTYPE6LENGTH V_ULPTYPE6LENGTH(1U) + +#define S_ULPTYPE6OFFSET 24 +#define M_ULPTYPE6OFFSET 0x7U +#define V_ULPTYPE6OFFSET(x) ((x) << S_ULPTYPE6OFFSET) +#define G_ULPTYPE6OFFSET(x) (((x) >> S_ULPTYPE6OFFSET) & M_ULPTYPE6OFFSET) + +#define S_ULPTYPE5LENGTH 23 +#define V_ULPTYPE5LENGTH(x) ((x) << S_ULPTYPE5LENGTH) +#define F_ULPTYPE5LENGTH V_ULPTYPE5LENGTH(1U) + +#define S_ULPTYPE5OFFSET 20 +#define M_ULPTYPE5OFFSET 0x7U +#define V_ULPTYPE5OFFSET(x) ((x) << S_ULPTYPE5OFFSET) +#define G_ULPTYPE5OFFSET(x) (((x) >> S_ULPTYPE5OFFSET) & M_ULPTYPE5OFFSET) + +#define S_ULPTYPE4LENGTH 19 +#define V_ULPTYPE4LENGTH(x) ((x) << S_ULPTYPE4LENGTH) +#define F_ULPTYPE4LENGTH V_ULPTYPE4LENGTH(1U) + +#define S_ULPTYPE4OFFSET 16 +#define M_ULPTYPE4OFFSET 0x7U +#define V_ULPTYPE4OFFSET(x) ((x) << S_ULPTYPE4OFFSET) +#define G_ULPTYPE4OFFSET(x) (((x) >> S_ULPTYPE4OFFSET) & M_ULPTYPE4OFFSET) + +#define S_ULPTYPE3LENGTH 15 +#define V_ULPTYPE3LENGTH(x) ((x) << S_ULPTYPE3LENGTH) +#define F_ULPTYPE3LENGTH V_ULPTYPE3LENGTH(1U) + +#define S_ULPTYPE3OFFSET 12 +#define M_ULPTYPE3OFFSET 0x7U +#define V_ULPTYPE3OFFSET(x) ((x) << S_ULPTYPE3OFFSET) +#define G_ULPTYPE3OFFSET(x) (((x) >> S_ULPTYPE3OFFSET) & M_ULPTYPE3OFFSET) + +#define S_ULPTYPE2LENGTH 11 +#define V_ULPTYPE2LENGTH(x) ((x) << S_ULPTYPE2LENGTH) +#define F_ULPTYPE2LENGTH V_ULPTYPE2LENGTH(1U) + +#define S_ULPTYPE2OFFSET 8 +#define M_ULPTYPE2OFFSET 0x7U +#define V_ULPTYPE2OFFSET(x) ((x) << S_ULPTYPE2OFFSET) +#define G_ULPTYPE2OFFSET(x) (((x) >> S_ULPTYPE2OFFSET) & M_ULPTYPE2OFFSET) + +#define S_ULPTYPE1LENGTH 7 +#define V_ULPTYPE1LENGTH(x) ((x) << S_ULPTYPE1LENGTH) +#define F_ULPTYPE1LENGTH V_ULPTYPE1LENGTH(1U) + +#define S_ULPTYPE1OFFSET 4 +#define M_ULPTYPE1OFFSET 0x7U +#define V_ULPTYPE1OFFSET(x) ((x) << S_ULPTYPE1OFFSET) +#define G_ULPTYPE1OFFSET(x) (((x) >> S_ULPTYPE1OFFSET) & M_ULPTYPE1OFFSET) + +#define S_ULPTYPE0LENGTH 3 +#define V_ULPTYPE0LENGTH(x) ((x) << S_ULPTYPE0LENGTH) +#define F_ULPTYPE0LENGTH V_ULPTYPE0LENGTH(1U) + +#define S_ULPTYPE0OFFSET 0 +#define M_ULPTYPE0OFFSET 0x7U +#define V_ULPTYPE0OFFSET(x) ((x) << S_ULPTYPE0OFFSET) +#define G_ULPTYPE0OFFSET(x) (((x) >> S_ULPTYPE0OFFSET) & M_ULPTYPE0OFFSET) + #define A_TP_RSS_LKP_TABLE 0x7dec #define S_LKPTBLROWVLD 31 @@ -14976,6 +23244,11 @@ #define V_LKPTBLQUEUE0(x) ((x) << S_LKPTBLQUEUE0) #define G_LKPTBLQUEUE0(x) (((x) >> S_LKPTBLQUEUE0) & M_LKPTBLQUEUE0) +#define S_T6_LKPTBLROWIDX 20 +#define M_T6_LKPTBLROWIDX 0x7ffU +#define V_T6_LKPTBLROWIDX(x) ((x) << S_T6_LKPTBLROWIDX) +#define G_T6_LKPTBLROWIDX(x) (((x) >> S_T6_LKPTBLROWIDX) & M_T6_LKPTBLROWIDX) + #define A_TP_RSS_CONFIG 0x7df0 #define S_TNL4TUPENIPV6 31 @@ -15102,6 +23375,10 @@ #define V_HASHXOR(x) ((x) << S_HASHXOR) #define F_HASHXOR V_HASHXOR(1U) +#define S_TNLFCOESID 22 +#define V_TNLFCOESID(x) ((x) << S_TNLFCOESID) +#define F_TNLFCOESID V_TNLFCOESID(1U) + #define A_TP_RSS_CONFIG_TNL 0x7df4 #define S_MASKSIZE 28 @@ -15118,6 +23395,14 @@ #define V_USEWIRECH(x) ((x) << S_USEWIRECH) #define F_USEWIRECH V_USEWIRECH(1U) +#define S_HASHALL 2 +#define V_HASHALL(x) ((x) << S_HASHALL) +#define F_HASHALL V_HASHALL(1U) + +#define S_HASHETH 1 +#define V_HASHETH(x) ((x) << S_HASHETH) +#define F_HASHETH V_HASHETH(1U) + #define A_TP_RSS_CONFIG_OFD 0x7df8 #define S_RRCPLMAPEN 20 @@ -15197,6 +23482,20 @@ #define V_VFFWEN(x) ((x) << S_VFFWEN) #define F_VFFWEN V_VFFWEN(1U) +#define S_KEYWRADDRX 30 +#define M_KEYWRADDRX 0x3U +#define V_KEYWRADDRX(x) ((x) << S_KEYWRADDRX) +#define G_KEYWRADDRX(x) (((x) >> S_KEYWRADDRX) & M_KEYWRADDRX) + +#define S_KEYEXTEND 26 +#define V_KEYEXTEND(x) ((x) << S_KEYEXTEND) +#define F_KEYEXTEND V_KEYEXTEND(1U) + +#define S_T6_VFWRADDR 8 +#define M_T6_VFWRADDR 0xffU +#define V_T6_VFWRADDR(x) ((x) << S_T6_VFWRADDR) +#define G_T6_VFWRADDR(x) (((x) >> S_T6_VFWRADDR) & M_T6_VFWRADDR) + #define A_TP_RSS_CONFIG_CNG 0x7e04 #define S_CHNCOUNT3 31 @@ -15580,6 +23879,10 @@ #define V_CTPOUTPLDFIFOPERR(x) ((x) << S_CTPOUTPLDFIFOPERR) #define F_CTPOUTPLDFIFOPERR V_CTPOUTPLDFIFOPERR(1U) +#define S_SRQTABLEPERR 1 +#define V_SRQTABLEPERR(x) ((x) << S_SRQTABLEPERR) +#define F_SRQTABLEPERR V_SRQTABLEPERR(1U) + #define A_TP_INT_CAUSE 0x7e74 #define A_TP_PER_ENABLE 0x7e78 #define A_TP_FLM_FREE_PS_CNT 0x7e80 @@ -16285,6 +24588,14 @@ #define V_TXMAPCHANNEL0(x) ((x) << S_TXMAPCHANNEL0) #define G_TXMAPCHANNEL0(x) (((x) >> S_TXMAPCHANNEL0) & M_TXMAPCHANNEL0) +#define S_TXLPKCHANNEL1 17 +#define V_TXLPKCHANNEL1(x) ((x) << S_TXLPKCHANNEL1) +#define F_TXLPKCHANNEL1 V_TXLPKCHANNEL1(1U) + +#define S_TXLPKCHANNEL0 16 +#define V_TXLPKCHANNEL0(x) ((x) << S_TXLPKCHANNEL0) +#define F_TXLPKCHANNEL0 V_TXLPKCHANNEL0(1U) + #define A_TP_TX_SCHED_HDR 0x23 #define S_TXMAPHDRCHANNEL7 28 @@ -16642,13 +24953,56 @@ #define V_CH0DEFAULTQUEUE(x) ((x) << S_CH0DEFAULTQUEUE) #define G_CH0DEFAULTQUEUE(x) (((x) >> S_CH0DEFAULTQUEUE) & M_CH0DEFAULTQUEUE) +#define S_PRIENABLE 30 +#define V_PRIENABLE(x) ((x) << S_PRIENABLE) +#define F_PRIENABLE V_PRIENABLE(1U) + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF1_CONFIG 0x31 + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF2_CONFIG 0x32 + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF3_CONFIG 0x33 + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF4_CONFIG 0x34 + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF5_CONFIG 0x35 + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF6_CONFIG 0x36 + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF7_CONFIG 0x37 + +#define S_T6_CHNENABLE 29 +#define V_T6_CHNENABLE(x) ((x) << S_T6_CHNENABLE) +#define F_T6_CHNENABLE V_T6_CHNENABLE(1U) + #define A_TP_RSS_PF_MAP 0x38 #define S_LKPIDXSIZE 24 @@ -16838,6 +25192,13 @@ #define V_ETHTYPE0(x) ((x) << S_ETHTYPE0) #define G_ETHTYPE0(x) (((x) >> S_ETHTYPE0) & M_ETHTYPE0) +#define A_TP_VXLAN_HEADER 0x53 + +#define S_VXLANPORT 0 +#define M_VXLANPORT 0xffffU +#define V_VXLANPORT(x) ((x) << S_VXLANPORT) +#define G_VXLANPORT(x) (((x) >> S_VXLANPORT) & M_VXLANPORT) + #define A_TP_CORE_POWER 0x54 #define S_SLEEPRDYVNT 12 @@ -16910,6 +25271,114 @@ #define V_IMMEDIATEEN(x) ((x) << S_IMMEDIATEEN) #define F_IMMEDIATEEN V_IMMEDIATEEN(1U) +#define S_SHAREDRQEN 31 +#define V_SHAREDRQEN(x) ((x) << S_SHAREDRQEN) +#define F_SHAREDRQEN V_SHAREDRQEN(1U) + +#define S_SHAREDXRC 30 +#define V_SHAREDXRC(x) ((x) << S_SHAREDXRC) +#define F_SHAREDXRC V_SHAREDXRC(1U) + +#define A_TP_FRAG_CONFIG 0x56 + +#define S_TLSMODE 16 +#define M_TLSMODE 0x3U +#define V_TLSMODE(x) ((x) << S_TLSMODE) +#define G_TLSMODE(x) (((x) >> S_TLSMODE) & M_TLSMODE) + +#define S_USERMODE 14 +#define M_USERMODE 0x3U +#define V_USERMODE(x) ((x) << S_USERMODE) +#define G_USERMODE(x) (((x) >> S_USERMODE) & M_USERMODE) + +#define S_FCOEMODE 12 +#define M_FCOEMODE 0x3U +#define V_FCOEMODE(x) ((x) << S_FCOEMODE) +#define G_FCOEMODE(x) (((x) >> S_FCOEMODE) & M_FCOEMODE) + +#define S_IANDPMODE 10 +#define M_IANDPMODE 0x3U +#define V_IANDPMODE(x) ((x) << S_IANDPMODE) +#define G_IANDPMODE(x) (((x) >> S_IANDPMODE) & M_IANDPMODE) + +#define S_RDDPMODE 8 +#define M_RDDPMODE 0x3U +#define V_RDDPMODE(x) ((x) << S_RDDPMODE) +#define G_RDDPMODE(x) (((x) >> S_RDDPMODE) & M_RDDPMODE) + +#define S_IWARPMODE 6 +#define M_IWARPMODE 0x3U +#define V_IWARPMODE(x) ((x) << S_IWARPMODE) +#define G_IWARPMODE(x) (((x) >> S_IWARPMODE) & M_IWARPMODE) + +#define S_ISCSIMODE 4 +#define M_ISCSIMODE 0x3U +#define V_ISCSIMODE(x) ((x) << S_ISCSIMODE) +#define G_ISCSIMODE(x) (((x) >> S_ISCSIMODE) & M_ISCSIMODE) + +#define S_DDPMODE 2 +#define M_DDPMODE 0x3U +#define V_DDPMODE(x) ((x) << S_DDPMODE) +#define G_DDPMODE(x) (((x) >> S_DDPMODE) & M_DDPMODE) + +#define S_PASSMODE 0 +#define M_PASSMODE 0x3U +#define V_PASSMODE(x) ((x) << S_PASSMODE) +#define G_PASSMODE(x) (((x) >> S_PASSMODE) & M_PASSMODE) + +#define A_TP_CMM_CONFIG 0x57 + +#define S_WRCNTIDLE 16 +#define M_WRCNTIDLE 0xffffU +#define V_WRCNTIDLE(x) ((x) << S_WRCNTIDLE) +#define G_WRCNTIDLE(x) (((x) >> S_WRCNTIDLE) & M_WRCNTIDLE) + +#define S_RDTHRESHOLD 8 +#define M_RDTHRESHOLD 0x3fU +#define V_RDTHRESHOLD(x) ((x) << S_RDTHRESHOLD) +#define G_RDTHRESHOLD(x) (((x) >> S_RDTHRESHOLD) & M_RDTHRESHOLD) + +#define S_WRTHRLEVEL2 7 +#define V_WRTHRLEVEL2(x) ((x) << S_WRTHRLEVEL2) +#define F_WRTHRLEVEL2 V_WRTHRLEVEL2(1U) + +#define S_WRTHRLEVEL1 6 +#define V_WRTHRLEVEL1(x) ((x) << S_WRTHRLEVEL1) +#define F_WRTHRLEVEL1 V_WRTHRLEVEL1(1U) + +#define S_WRTHRTHRESHEN 5 +#define V_WRTHRTHRESHEN(x) ((x) << S_WRTHRTHRESHEN) +#define F_WRTHRTHRESHEN V_WRTHRTHRESHEN(1U) + +#define S_WRTHRTHRESH 0 +#define M_WRTHRTHRESH 0x1fU +#define V_WRTHRTHRESH(x) ((x) << S_WRTHRTHRESH) +#define G_WRTHRTHRESH(x) (((x) >> S_WRTHRTHRESH) & M_WRTHRTHRESH) + +#define A_TP_VXLAN_CONFIG 0x58 + +#define S_VXLANFLAGS 16 +#define M_VXLANFLAGS 0xffffU +#define V_VXLANFLAGS(x) ((x) << S_VXLANFLAGS) +#define G_VXLANFLAGS(x) (((x) >> S_VXLANFLAGS) & M_VXLANFLAGS) + +#define S_VXLANTYPE 0 +#define M_VXLANTYPE 0xffffU +#define V_VXLANTYPE(x) ((x) << S_VXLANTYPE) +#define G_VXLANTYPE(x) (((x) >> S_VXLANTYPE) & M_VXLANTYPE) + +#define A_TP_NVGRE_CONFIG 0x59 + +#define S_GREFLAGS 16 +#define M_GREFLAGS 0xffffU +#define V_GREFLAGS(x) ((x) << S_GREFLAGS) +#define G_GREFLAGS(x) (((x) >> S_GREFLAGS) & M_GREFLAGS) + +#define S_GRETYPE 0 +#define M_GRETYPE 0xffffU +#define V_GRETYPE(x) ((x) << S_GRETYPE) +#define G_GRETYPE(x) (((x) >> S_GRETYPE) & M_GRETYPE) + #define A_TP_DBG_CLEAR 0x60 #define A_TP_DBG_CORE_HDR0 0x61 @@ -17362,6 +25831,22 @@ #define V_DELDRDY(x) ((x) << S_DELDRDY) #define F_DELDRDY V_DELDRDY(1U) +#define S_T5_ETXBUSY 1 +#define V_T5_ETXBUSY(x) ((x) << S_T5_ETXBUSY) +#define F_T5_ETXBUSY V_T5_ETXBUSY(1U) + +#define S_T5_EPCMDBUSY 0 +#define V_T5_EPCMDBUSY(x) ((x) << S_T5_EPCMDBUSY) +#define F_T5_EPCMDBUSY V_T5_EPCMDBUSY(1U) + +#define S_T6_ETXBUSY 1 +#define V_T6_ETXBUSY(x) ((x) << S_T6_ETXBUSY) +#define F_T6_ETXBUSY V_T6_ETXBUSY(1U) + +#define S_T6_EPCMDBUSY 0 +#define V_T6_EPCMDBUSY(x) ((x) << S_T6_EPCMDBUSY) +#define F_T6_EPCMDBUSY V_T6_EPCMDBUSY(1U) + #define A_TP_DBG_ENG_RES1 0x67 #define S_RXCPLSRDY 31 @@ -17451,6 +25936,10 @@ #define V_RCFDATACMRDY(x) ((x) << S_RCFDATACMRDY) #define F_RCFDATACMRDY V_RCFDATACMRDY(1U) +#define S_RXISSSRDY 28 +#define V_RXISSSRDY(x) ((x) << S_RXISSSRDY) +#define F_RXISSSRDY V_RXISSSRDY(1U) + #define A_TP_DBG_ENG_RES2 0x68 #define S_CPLCMDRAW 24 @@ -17611,7 +26100,49 @@ #define V_RXMODXOFF(x) ((x) << S_RXMODXOFF) #define G_RXMODXOFF(x) (((x) >> S_RXMODXOFF) & M_RXMODXOFF) +#define S_T5_RXFIFOCNG 20 +#define M_T5_RXFIFOCNG 0xfU +#define V_T5_RXFIFOCNG(x) ((x) << S_T5_RXFIFOCNG) +#define G_T5_RXFIFOCNG(x) (((x) >> S_T5_RXFIFOCNG) & M_T5_RXFIFOCNG) + +#define S_T5_RXPCMDCNG 14 +#define M_T5_RXPCMDCNG 0x3U +#define V_T5_RXPCMDCNG(x) ((x) << S_T5_RXPCMDCNG) +#define G_T5_RXPCMDCNG(x) (((x) >> S_T5_RXPCMDCNG) & M_T5_RXPCMDCNG) + +#define S_T6_RXFIFOCNG 20 +#define M_T6_RXFIFOCNG 0xfU +#define V_T6_RXFIFOCNG(x) ((x) << S_T6_RXFIFOCNG) +#define G_T6_RXFIFOCNG(x) (((x) >> S_T6_RXFIFOCNG) & M_T6_RXFIFOCNG) + +#define S_T6_RXPCMDCNG 14 +#define M_T6_RXPCMDCNG 0x3U +#define V_T6_RXPCMDCNG(x) ((x) << S_T6_RXPCMDCNG) +#define G_T6_RXPCMDCNG(x) (((x) >> S_T6_RXPCMDCNG) & M_T6_RXPCMDCNG) + #define A_TP_DBG_ERROR_CNT 0x6c +#define A_TP_DBG_CORE_CPL 0x6d + +#define S_CPLCMDOUT3 24 +#define M_CPLCMDOUT3 0xffU +#define V_CPLCMDOUT3(x) ((x) << S_CPLCMDOUT3) +#define G_CPLCMDOUT3(x) (((x) >> S_CPLCMDOUT3) & M_CPLCMDOUT3) + +#define S_CPLCMDOUT2 16 +#define M_CPLCMDOUT2 0xffU +#define V_CPLCMDOUT2(x) ((x) << S_CPLCMDOUT2) +#define G_CPLCMDOUT2(x) (((x) >> S_CPLCMDOUT2) & M_CPLCMDOUT2) + +#define S_CPLCMDOUT1 8 +#define M_CPLCMDOUT1 0xffU +#define V_CPLCMDOUT1(x) ((x) << S_CPLCMDOUT1) +#define G_CPLCMDOUT1(x) (((x) >> S_CPLCMDOUT1) & M_CPLCMDOUT1) + +#define S_CPLCMDOUT0 0 +#define M_CPLCMDOUT0 0xffU +#define V_CPLCMDOUT0(x) ((x) << S_CPLCMDOUT0) +#define G_CPLCMDOUT0(x) (((x) >> S_CPLCMDOUT0) & M_CPLCMDOUT0) + #define A_TP_MIB_DEBUG 0x6f #define S_SRC3 31 @@ -17650,6 +26181,12 @@ #define V_LINENUM0(x) ((x) << S_LINENUM0) #define G_LINENUM0(x) (((x) >> S_LINENUM0) & M_LINENUM0) +#define A_TP_DBG_CACHE_WR_ALL 0x70 +#define A_TP_DBG_CACHE_WR_HIT 0x71 +#define A_TP_DBG_CACHE_RD_ALL 0x72 +#define A_TP_DBG_CACHE_RD_HIT 0x73 +#define A_TP_DBG_CACHE_MC_REQ 0x74 +#define A_TP_DBG_CACHE_MC_RSP 0x75 #define A_TP_T5_TX_DROP_CNT_CH0 0x120 #define A_TP_T5_TX_DROP_CNT_CH1 0x121 #define A_TP_TX_DROP_CNT_CH2 0x122 @@ -18119,7 +26656,36 @@ #define V_TXFULL(x) ((x) << S_TXFULL) #define F_TXFULL V_TXFULL(1U) +#define S_FIFOGRERXVALID 15 +#define V_FIFOGRERXVALID(x) ((x) << S_FIFOGRERXVALID) +#define F_FIFOGRERXVALID V_FIFOGRERXVALID(1U) + +#define S_FIFOGRERXREADY 14 +#define V_FIFOGRERXREADY(x) ((x) << S_FIFOGRERXREADY) +#define F_FIFOGRERXREADY V_FIFOGRERXREADY(1U) + +#define S_FIFOGRERXSOCP 13 +#define V_FIFOGRERXSOCP(x) ((x) << S_FIFOGRERXSOCP) +#define F_FIFOGRERXSOCP V_FIFOGRERXSOCP(1U) + +#define S_T6_ESTATIC4 12 +#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4) +#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U) + +#define S_TXFULL_ESIDE0 0 +#define V_TXFULL_ESIDE0(x) ((x) << S_TXFULL_ESIDE0) +#define F_TXFULL_ESIDE0 V_TXFULL_ESIDE0(1U) + #define A_TP_DBG_ESIDE_DISP1 0x137 + +#define S_T6_ESTATIC4 12 +#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4) +#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U) + +#define S_TXFULL_ESIDE1 0 +#define V_TXFULL_ESIDE1(x) ((x) << S_TXFULL_ESIDE1) +#define F_TXFULL_ESIDE1 V_TXFULL_ESIDE1(1U) + #define A_TP_MAC_MATCH_MAP0 0x138 #define S_MAPVALUEWR 16 @@ -18148,7 +26714,25 @@ #define G_MAPVALUERD(x) (((x) >> S_MAPVALUERD) & M_MAPVALUERD) #define A_TP_DBG_ESIDE_DISP2 0x13a + +#define S_T6_ESTATIC4 12 +#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4) +#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U) + +#define S_TXFULL_ESIDE2 0 +#define V_TXFULL_ESIDE2(x) ((x) << S_TXFULL_ESIDE2) +#define F_TXFULL_ESIDE2 V_TXFULL_ESIDE2(1U) + #define A_TP_DBG_ESIDE_DISP3 0x13b + +#define S_T6_ESTATIC4 12 +#define V_T6_ESTATIC4(x) ((x) << S_T6_ESTATIC4) +#define F_T6_ESTATIC4 V_T6_ESTATIC4(1U) + +#define S_TXFULL_ESIDE3 0 +#define V_TXFULL_ESIDE3(x) ((x) << S_TXFULL_ESIDE3) +#define F_TXFULL_ESIDE3 V_TXFULL_ESIDE3(1U) + #define A_TP_DBG_ESIDE_HDR0 0x13c #define S_TCPSOPCNT 28 @@ -18296,6 +26880,10 @@ #define V_FRAG_LEN_MOD8_COMPAT(x) ((x) << S_FRAG_LEN_MOD8_COMPAT) #define F_FRAG_LEN_MOD8_COMPAT V_FRAG_LEN_MOD8_COMPAT(1U) +#define S_USE_ENC_IDX 13 +#define V_USE_ENC_IDX(x) ((x) << S_USE_ENC_IDX) +#define F_USE_ENC_IDX V_USE_ENC_IDX(1U) + #define A_TP_TX_DROP_CFG_CH2 0x142 #define A_TP_TX_DROP_CFG_CH3 0x143 #define A_TP_EGRESS_CONFIG 0x145 @@ -18416,6 +27004,14 @@ #define V_TCPOPTTXFULL(x) ((x) << S_TCPOPTTXFULL) #define F_TCPOPTTXFULL V_TCPOPTTXFULL(1U) +#define S_PKTATTRSRDY 3 +#define V_PKTATTRSRDY(x) ((x) << S_PKTATTRSRDY) +#define F_PKTATTRSRDY V_PKTATTRSRDY(1U) + +#define S_PKTATTRDRDY 2 +#define V_PKTATTRDRDY(x) ((x) << S_PKTATTRDRDY) +#define F_PKTATTRDRDY V_PKTATTRDRDY(1U) + #define A_TP_DBG_ESIDE_DEMUX 0x149 #define S_EALLDONE 28 @@ -18683,6 +27279,11 @@ #define V_ETXFULL(x) ((x) << S_ETXFULL) #define G_ETXFULL(x) (((x) >> S_ETXFULL) & M_ETXFULL) +#define S_TXERRORCNT 8 +#define M_TXERRORCNT 0xffffffU +#define V_TXERRORCNT(x) ((x) << S_TXERRORCNT) +#define G_TXERRORCNT(x) (((x) >> S_TXERRORCNT) & M_TXERRORCNT) + #define A_TP_ESIDE_SVID_MASK 0x151 #define A_TP_ESIDE_DVID_MASK 0x152 #define A_TP_ESIDE_ALIGN_MASK 0x153 @@ -18862,6 +27463,29 @@ #define V_OPT_PARSER_COOKIE_CHANNEL0(x) ((x) << S_OPT_PARSER_COOKIE_CHANNEL0) #define G_OPT_PARSER_COOKIE_CHANNEL0(x) (((x) >> S_OPT_PARSER_COOKIE_CHANNEL0) & M_OPT_PARSER_COOKIE_CHANNEL0) +#define A_TP_DBG_ESIDE_DEMUX_WAIT0 0x158 +#define A_TP_DBG_ESIDE_DEMUX_WAIT1 0x159 +#define A_TP_DBG_ESIDE_DEMUX_CNT0 0x15a +#define A_TP_DBG_ESIDE_DEMUX_CNT1 0x15b +#define A_TP_ESIDE_CONFIG 0x160 + +#define S_VNI_EN 26 +#define V_VNI_EN(x) ((x) << S_VNI_EN) +#define F_VNI_EN V_VNI_EN(1U) + +#define S_ENC_RX_EN 25 +#define V_ENC_RX_EN(x) ((x) << S_ENC_RX_EN) +#define F_ENC_RX_EN V_ENC_RX_EN(1U) + +#define S_TNL_LKP_INNER_SEL 24 +#define V_TNL_LKP_INNER_SEL(x) ((x) << S_TNL_LKP_INNER_SEL) +#define F_TNL_LKP_INNER_SEL V_TNL_LKP_INNER_SEL(1U) + +#define S_ROCEV2UDPPORT 0 +#define M_ROCEV2UDPPORT 0xffffU +#define V_ROCEV2UDPPORT(x) ((x) << S_ROCEV2UDPPORT) +#define G_ROCEV2UDPPORT(x) (((x) >> S_ROCEV2UDPPORT) & M_ROCEV2UDPPORT) + #define A_TP_DBG_CSIDE_RX0 0x230 #define S_CRXSOPCNT 28 @@ -19274,14 +27898,30 @@ #define V_CMD_SEL(x) ((x) << S_CMD_SEL) #define F_CMD_SEL V_CMD_SEL(1U) +#define S_T5_TXFULL 31 +#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL) +#define F_T5_TXFULL V_T5_TXFULL(1U) + #define S_CPL5RXFULL 26 #define V_CPL5RXFULL(x) ((x) << S_CPL5RXFULL) #define F_CPL5RXFULL V_CPL5RXFULL(1U) +#define S_T5_PLD_RXZEROP_SRDY 25 +#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY) +#define F_T5_PLD_RXZEROP_SRDY V_T5_PLD_RXZEROP_SRDY(1U) + #define S_PLD2XRXVALID 23 #define V_PLD2XRXVALID(x) ((x) << S_PLD2XRXVALID) #define F_PLD2XRXVALID V_PLD2XRXVALID(1U) +#define S_T5_DDP_SRDY 22 +#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY) +#define F_T5_DDP_SRDY V_T5_DDP_SRDY(1U) + +#define S_T5_DDP_DRDY 21 +#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY) +#define F_T5_DDP_DRDY V_T5_DDP_DRDY(1U) + #define S_DDPSTATE 16 #define M_DDPSTATE 0x1fU #define V_DDPSTATE(x) ((x) << S_DDPSTATE) @@ -19318,7 +27958,56 @@ #define V_TXFULL2X(x) ((x) << S_TXFULL2X) #define F_TXFULL2X V_TXFULL2X(1U) +#define S_T6_TXFULL 31 +#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL) +#define F_T6_TXFULL V_T6_TXFULL(1U) + +#define S_T6_PLD_RXZEROP_SRDY 25 +#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY) +#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U) + +#define S_T6_DDP_SRDY 22 +#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY) +#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U) + +#define S_T6_DDP_DRDY 21 +#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY) +#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U) + #define A_TP_DBG_CSIDE_DISP1 0x23b + +#define S_T5_TXFULL 31 +#define V_T5_TXFULL(x) ((x) << S_T5_TXFULL) +#define F_T5_TXFULL V_T5_TXFULL(1U) + +#define S_T5_PLD_RXZEROP_SRDY 25 +#define V_T5_PLD_RXZEROP_SRDY(x) ((x) << S_T5_PLD_RXZEROP_SRDY) +#define F_T5_PLD_RXZEROP_SRDY V_T5_PLD_RXZEROP_SRDY(1U) + +#define S_T5_DDP_SRDY 22 +#define V_T5_DDP_SRDY(x) ((x) << S_T5_DDP_SRDY) +#define F_T5_DDP_SRDY V_T5_DDP_SRDY(1U) + +#define S_T5_DDP_DRDY 21 +#define V_T5_DDP_DRDY(x) ((x) << S_T5_DDP_DRDY) +#define F_T5_DDP_DRDY V_T5_DDP_DRDY(1U) + +#define S_T6_TXFULL 31 +#define V_T6_TXFULL(x) ((x) << S_T6_TXFULL) +#define F_T6_TXFULL V_T6_TXFULL(1U) + +#define S_T6_PLD_RXZEROP_SRDY 25 +#define V_T6_PLD_RXZEROP_SRDY(x) ((x) << S_T6_PLD_RXZEROP_SRDY) +#define F_T6_PLD_RXZEROP_SRDY V_T6_PLD_RXZEROP_SRDY(1U) + +#define S_T6_DDP_SRDY 22 +#define V_T6_DDP_SRDY(x) ((x) << S_T6_DDP_SRDY) +#define F_T6_DDP_SRDY V_T6_DDP_SRDY(1U) + +#define S_T6_DDP_DRDY 21 +#define V_T6_DDP_DRDY(x) ((x) << S_T6_DDP_DRDY) +#define F_T6_DDP_DRDY V_T6_DDP_DRDY(1U) + #define A_TP_DBG_CSIDE_DDP0 0x23c #define S_DDPMSGLATEST7 28 @@ -19525,6 +28214,10 @@ #define V_ATOMICCMDEN(x) ((x) << S_ATOMICCMDEN) #define F_ATOMICCMDEN V_ATOMICCMDEN(1U) +#define S_ISCSICMDMODE 28 +#define V_ISCSICMDMODE(x) ((x) << S_ISCSICMDMODE) +#define F_ISCSICMDMODE V_ISCSICMDMODE(1U) + #define A_TP_CSPI_POWER 0x243 #define S_GATECHNTX3 11 @@ -19614,6 +28307,11 @@ #define V_CRXFULL3(x) ((x) << S_CRXFULL3) #define F_CRXFULL3 V_CRXFULL3(1U) +#define S_T5_CPRSSTATE3 24 +#define M_T5_CPRSSTATE3 0xfU +#define V_T5_CPRSSTATE3(x) ((x) << S_T5_CPRSSTATE3) +#define G_T5_CPRSSTATE3(x) (((x) >> S_T5_CPRSSTATE3) & M_T5_CPRSSTATE3) + #define S_C4TUPBUSY2 23 #define V_C4TUPBUSY2(x) ((x) << S_C4TUPBUSY2) #define F_C4TUPBUSY2 V_C4TUPBUSY2(1U) @@ -19630,6 +28328,11 @@ #define V_CRXFULL2(x) ((x) << S_CRXFULL2) #define F_CRXFULL2 V_CRXFULL2(1U) +#define S_T5_CPRSSTATE2 16 +#define M_T5_CPRSSTATE2 0xfU +#define V_T5_CPRSSTATE2(x) ((x) << S_T5_CPRSSTATE2) +#define G_T5_CPRSSTATE2(x) (((x) >> S_T5_CPRSSTATE2) & M_T5_CPRSSTATE2) + #define S_C4TUPBUSY1 15 #define V_C4TUPBUSY1(x) ((x) << S_C4TUPBUSY1) #define F_C4TUPBUSY1 V_C4TUPBUSY1(1U) @@ -19646,6 +28349,11 @@ #define V_CRXFULL1(x) ((x) << S_CRXFULL1) #define F_CRXFULL1 V_CRXFULL1(1U) +#define S_T5_CPRSSTATE1 8 +#define M_T5_CPRSSTATE1 0xfU +#define V_T5_CPRSSTATE1(x) ((x) << S_T5_CPRSSTATE1) +#define G_T5_CPRSSTATE1(x) (((x) >> S_T5_CPRSSTATE1) & M_T5_CPRSSTATE1) + #define S_C4TUPBUSY0 7 #define V_C4TUPBUSY0(x) ((x) << S_C4TUPBUSY0) #define F_C4TUPBUSY0 V_C4TUPBUSY0(1U) @@ -19662,6 +28370,31 @@ #define V_CRXFULL0(x) ((x) << S_CRXFULL0) #define F_CRXFULL0 V_CRXFULL0(1U) +#define S_T5_CPRSSTATE0 0 +#define M_T5_CPRSSTATE0 0xfU +#define V_T5_CPRSSTATE0(x) ((x) << S_T5_CPRSSTATE0) +#define G_T5_CPRSSTATE0(x) (((x) >> S_T5_CPRSSTATE0) & M_T5_CPRSSTATE0) + +#define S_T6_CPRSSTATE3 24 +#define M_T6_CPRSSTATE3 0xfU +#define V_T6_CPRSSTATE3(x) ((x) << S_T6_CPRSSTATE3) +#define G_T6_CPRSSTATE3(x) (((x) >> S_T6_CPRSSTATE3) & M_T6_CPRSSTATE3) + +#define S_T6_CPRSSTATE2 16 +#define M_T6_CPRSSTATE2 0xfU +#define V_T6_CPRSSTATE2(x) ((x) << S_T6_CPRSSTATE2) +#define G_T6_CPRSSTATE2(x) (((x) >> S_T6_CPRSSTATE2) & M_T6_CPRSSTATE2) + +#define S_T6_CPRSSTATE1 8 +#define M_T6_CPRSSTATE1 0xfU +#define V_T6_CPRSSTATE1(x) ((x) << S_T6_CPRSSTATE1) +#define G_T6_CPRSSTATE1(x) (((x) >> S_T6_CPRSSTATE1) & M_T6_CPRSSTATE1) + +#define S_T6_CPRSSTATE0 0 +#define M_T6_CPRSSTATE0 0xfU +#define V_T6_CPRSSTATE0(x) ((x) << S_T6_CPRSSTATE0) +#define G_T6_CPRSSTATE0(x) (((x) >> S_T6_CPRSSTATE0) & M_T6_CPRSSTATE0) + #define A_TP_DBG_CSIDE_DEMUX 0x247 #define S_CALLDONE 28 @@ -19849,6 +28582,50 @@ #define V_ERRVALID0(x) ((x) << S_ERRVALID0) #define F_ERRVALID0 V_ERRVALID0(1U) +#define A_TP_DBG_CSIDE_TRACE_CNT 0x24a + +#define S_TRCSOPCNT 24 +#define M_TRCSOPCNT 0xffU +#define V_TRCSOPCNT(x) ((x) << S_TRCSOPCNT) +#define G_TRCSOPCNT(x) (((x) >> S_TRCSOPCNT) & M_TRCSOPCNT) + +#define S_TRCEOPCNT 16 +#define M_TRCEOPCNT 0xffU +#define V_TRCEOPCNT(x) ((x) << S_TRCEOPCNT) +#define G_TRCEOPCNT(x) (((x) >> S_TRCEOPCNT) & M_TRCEOPCNT) + +#define S_TRCFLTHIT 12 +#define M_TRCFLTHIT 0xfU +#define V_TRCFLTHIT(x) ((x) << S_TRCFLTHIT) +#define G_TRCFLTHIT(x) (((x) >> S_TRCFLTHIT) & M_TRCFLTHIT) + +#define S_TRCRNTPKT 8 +#define M_TRCRNTPKT 0xfU +#define V_TRCRNTPKT(x) ((x) << S_TRCRNTPKT) +#define G_TRCRNTPKT(x) (((x) >> S_TRCRNTPKT) & M_TRCRNTPKT) + +#define S_TRCPKTLEN 0 +#define M_TRCPKTLEN 0xffU +#define V_TRCPKTLEN(x) ((x) << S_TRCPKTLEN) +#define G_TRCPKTLEN(x) (((x) >> S_TRCPKTLEN) & M_TRCPKTLEN) + +#define A_TP_DBG_CSIDE_TRACE_RSS 0x24b +#define A_TP_VLN_CONFIG 0x24c + +#define S_ETHTYPEQINQ 16 +#define M_ETHTYPEQINQ 0xffffU +#define V_ETHTYPEQINQ(x) ((x) << S_ETHTYPEQINQ) +#define G_ETHTYPEQINQ(x) (((x) >> S_ETHTYPEQINQ) & M_ETHTYPEQINQ) + +#define S_ETHTYPEVLAN 0 +#define M_ETHTYPEVLAN 0xffffU +#define V_ETHTYPEVLAN(x) ((x) << S_ETHTYPEVLAN) +#define G_ETHTYPEVLAN(x) (((x) >> S_ETHTYPEVLAN) & M_ETHTYPEVLAN) + +#define A_TP_DBG_CSIDE_ARBIT_WAIT0 0x24d +#define A_TP_DBG_CSIDE_ARBIT_WAIT1 0x24e +#define A_TP_DBG_CSIDE_ARBIT_CNT0 0x24f +#define A_TP_DBG_CSIDE_ARBIT_CNT1 0x250 #define A_TP_FIFO_CONFIG 0x8c0 #define S_CH1_OUTPUT 27 @@ -19986,6 +28763,10 @@ #define A_TP_MIB_ENG_LINE_1 0x6d #define A_TP_MIB_ENG_LINE_2 0x6e #define A_TP_MIB_ENG_LINE_3 0x6f +#define A_TP_MIB_TNL_ERR_0 0x70 +#define A_TP_MIB_TNL_ERR_1 0x71 +#define A_TP_MIB_TNL_ERR_2 0x72 +#define A_TP_MIB_TNL_ERR_3 0x73 /* registers for module ULP_TX */ #define ULP_TX_BASE_ADDR 0x8dc0 @@ -20028,6 +28809,46 @@ #define V_LOSDR(x) ((x) << S_LOSDR) #define F_LOSDR V_LOSDR(1U) +#define S_ULIMIT_EXCLUSIVE_FIX 16 +#define V_ULIMIT_EXCLUSIVE_FIX(x) ((x) << S_ULIMIT_EXCLUSIVE_FIX) +#define F_ULIMIT_EXCLUSIVE_FIX V_ULIMIT_EXCLUSIVE_FIX(1U) + +#define S_ISO_A_FLAG_EN 15 +#define V_ISO_A_FLAG_EN(x) ((x) << S_ISO_A_FLAG_EN) +#define F_ISO_A_FLAG_EN V_ISO_A_FLAG_EN(1U) + +#define S_IWARP_SEQ_FLIT_DIS 14 +#define V_IWARP_SEQ_FLIT_DIS(x) ((x) << S_IWARP_SEQ_FLIT_DIS) +#define F_IWARP_SEQ_FLIT_DIS V_IWARP_SEQ_FLIT_DIS(1U) + +#define S_MR_SIZE_FIX_EN 13 +#define V_MR_SIZE_FIX_EN(x) ((x) << S_MR_SIZE_FIX_EN) +#define F_MR_SIZE_FIX_EN V_MR_SIZE_FIX_EN(1U) + +#define S_T10_ISO_FIX_EN 12 +#define V_T10_ISO_FIX_EN(x) ((x) << S_T10_ISO_FIX_EN) +#define F_T10_ISO_FIX_EN V_T10_ISO_FIX_EN(1U) + +#define S_CPL_FLAGS_UPDATE_EN 11 +#define V_CPL_FLAGS_UPDATE_EN(x) ((x) << S_CPL_FLAGS_UPDATE_EN) +#define F_CPL_FLAGS_UPDATE_EN V_CPL_FLAGS_UPDATE_EN(1U) + +#define S_IWARP_SEQ_UPDATE_EN 10 +#define V_IWARP_SEQ_UPDATE_EN(x) ((x) << S_IWARP_SEQ_UPDATE_EN) +#define F_IWARP_SEQ_UPDATE_EN V_IWARP_SEQ_UPDATE_EN(1U) + +#define S_SEQ_UPDATE_EN 9 +#define V_SEQ_UPDATE_EN(x) ((x) << S_SEQ_UPDATE_EN) +#define F_SEQ_UPDATE_EN V_SEQ_UPDATE_EN(1U) + +#define S_ERR_ITT_EN 8 +#define V_ERR_ITT_EN(x) ((x) << S_ERR_ITT_EN) +#define F_ERR_ITT_EN V_ERR_ITT_EN(1U) + +#define S_ATOMIC_FIX_DIS 7 +#define V_ATOMIC_FIX_DIS(x) ((x) << S_ATOMIC_FIX_DIS) +#define F_ATOMIC_FIX_DIS V_ATOMIC_FIX_DIS(1U) + #define A_ULP_TX_PERR_INJECT 0x8dc4 #define A_ULP_TX_INT_ENABLE 0x8dc8 @@ -20166,6 +28987,28 @@ #define A_ULP_TX_PBL_LLIMIT 0x8ddc #define A_ULP_TX_PBL_ULIMIT 0x8de0 #define A_ULP_TX_CPL_ERR_OFFSET 0x8de4 +#define A_ULP_TX_TLS_CTL 0x8de4 + +#define S_TLSPERREN 4 +#define V_TLSPERREN(x) ((x) << S_TLSPERREN) +#define F_TLSPERREN V_TLSPERREN(1U) + +#define S_TLSPATHCTL 3 +#define V_TLSPATHCTL(x) ((x) << S_TLSPATHCTL) +#define F_TLSPATHCTL V_TLSPATHCTL(1U) + +#define S_TLSDISABLEIFUSE 2 +#define V_TLSDISABLEIFUSE(x) ((x) << S_TLSDISABLEIFUSE) +#define F_TLSDISABLEIFUSE V_TLSDISABLEIFUSE(1U) + +#define S_TLSDISABLECFUSE 1 +#define V_TLSDISABLECFUSE(x) ((x) << S_TLSDISABLECFUSE) +#define F_TLSDISABLECFUSE V_TLSDISABLECFUSE(1U) + +#define S_TLSDISABLE 0 +#define V_TLSDISABLE(x) ((x) << S_TLSDISABLE) +#define F_TLSDISABLE V_TLSDISABLE(1U) + #define A_ULP_TX_CPL_ERR_MASK_L 0x8de8 #define A_ULP_TX_CPL_ERR_MASK_H 0x8dec #define A_ULP_TX_CPL_ERR_VALUE_L 0x8df0 @@ -20321,6 +29164,17 @@ #define A_ULP_TX_ULP2TP_BIST_ERROR_CNT 0x8e34 #define A_ULP_TX_PERR_INJECT_2 0x8e34 + +#define S_T5_MEMSEL 1 +#define M_T5_MEMSEL 0x7U +#define V_T5_MEMSEL(x) ((x) << S_T5_MEMSEL) +#define G_T5_MEMSEL(x) (((x) >> S_T5_MEMSEL) & M_T5_MEMSEL) + +#define S_MEMSEL_ULPTX 1 +#define M_MEMSEL_ULPTX 0x1fU +#define V_MEMSEL_ULPTX(x) ((x) << S_MEMSEL_ULPTX) +#define G_MEMSEL_ULPTX(x) (((x) >> S_MEMSEL_ULPTX) & M_MEMSEL_ULPTX) + #define A_ULP_TX_FPGA_CMD_CTRL 0x8e38 #define A_ULP_TX_T5_FPGA_CMD_CTRL 0x8e38 @@ -20429,6 +29283,102 @@ #define V_T10_PI_SRAM_PERR_SET0(x) ((x) << S_T10_PI_SRAM_PERR_SET0) #define F_T10_PI_SRAM_PERR_SET0 V_T10_PI_SRAM_PERR_SET0(1U) +#define S_EDMA_IN_FIFO_PERR_SET3 31 +#define V_EDMA_IN_FIFO_PERR_SET3(x) ((x) << S_EDMA_IN_FIFO_PERR_SET3) +#define F_EDMA_IN_FIFO_PERR_SET3 V_EDMA_IN_FIFO_PERR_SET3(1U) + +#define S_EDMA_IN_FIFO_PERR_SET2 30 +#define V_EDMA_IN_FIFO_PERR_SET2(x) ((x) << S_EDMA_IN_FIFO_PERR_SET2) +#define F_EDMA_IN_FIFO_PERR_SET2 V_EDMA_IN_FIFO_PERR_SET2(1U) + +#define S_EDMA_IN_FIFO_PERR_SET1 29 +#define V_EDMA_IN_FIFO_PERR_SET1(x) ((x) << S_EDMA_IN_FIFO_PERR_SET1) +#define F_EDMA_IN_FIFO_PERR_SET1 V_EDMA_IN_FIFO_PERR_SET1(1U) + +#define S_EDMA_IN_FIFO_PERR_SET0 28 +#define V_EDMA_IN_FIFO_PERR_SET0(x) ((x) << S_EDMA_IN_FIFO_PERR_SET0) +#define F_EDMA_IN_FIFO_PERR_SET0 V_EDMA_IN_FIFO_PERR_SET0(1U) + +#define S_ALIGN_CTL_FIFO_PERR_SET3 27 +#define V_ALIGN_CTL_FIFO_PERR_SET3(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET3) +#define F_ALIGN_CTL_FIFO_PERR_SET3 V_ALIGN_CTL_FIFO_PERR_SET3(1U) + +#define S_ALIGN_CTL_FIFO_PERR_SET2 26 +#define V_ALIGN_CTL_FIFO_PERR_SET2(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET2) +#define F_ALIGN_CTL_FIFO_PERR_SET2 V_ALIGN_CTL_FIFO_PERR_SET2(1U) + +#define S_ALIGN_CTL_FIFO_PERR_SET1 25 +#define V_ALIGN_CTL_FIFO_PERR_SET1(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET1) +#define F_ALIGN_CTL_FIFO_PERR_SET1 V_ALIGN_CTL_FIFO_PERR_SET1(1U) + +#define S_ALIGN_CTL_FIFO_PERR_SET0 24 +#define V_ALIGN_CTL_FIFO_PERR_SET0(x) ((x) << S_ALIGN_CTL_FIFO_PERR_SET0) +#define F_ALIGN_CTL_FIFO_PERR_SET0 V_ALIGN_CTL_FIFO_PERR_SET0(1U) + +#define S_SGE_FIFO_PERR_SET3 23 +#define V_SGE_FIFO_PERR_SET3(x) ((x) << S_SGE_FIFO_PERR_SET3) +#define F_SGE_FIFO_PERR_SET3 V_SGE_FIFO_PERR_SET3(1U) + +#define S_SGE_FIFO_PERR_SET2 22 +#define V_SGE_FIFO_PERR_SET2(x) ((x) << S_SGE_FIFO_PERR_SET2) +#define F_SGE_FIFO_PERR_SET2 V_SGE_FIFO_PERR_SET2(1U) + +#define S_SGE_FIFO_PERR_SET1 21 +#define V_SGE_FIFO_PERR_SET1(x) ((x) << S_SGE_FIFO_PERR_SET1) +#define F_SGE_FIFO_PERR_SET1 V_SGE_FIFO_PERR_SET1(1U) + +#define S_SGE_FIFO_PERR_SET0 20 +#define V_SGE_FIFO_PERR_SET0(x) ((x) << S_SGE_FIFO_PERR_SET0) +#define F_SGE_FIFO_PERR_SET0 V_SGE_FIFO_PERR_SET0(1U) + +#define S_STAG_FIFO_PERR_SET3 19 +#define V_STAG_FIFO_PERR_SET3(x) ((x) << S_STAG_FIFO_PERR_SET3) +#define F_STAG_FIFO_PERR_SET3 V_STAG_FIFO_PERR_SET3(1U) + +#define S_STAG_FIFO_PERR_SET2 18 +#define V_STAG_FIFO_PERR_SET2(x) ((x) << S_STAG_FIFO_PERR_SET2) +#define F_STAG_FIFO_PERR_SET2 V_STAG_FIFO_PERR_SET2(1U) + +#define S_STAG_FIFO_PERR_SET1 17 +#define V_STAG_FIFO_PERR_SET1(x) ((x) << S_STAG_FIFO_PERR_SET1) +#define F_STAG_FIFO_PERR_SET1 V_STAG_FIFO_PERR_SET1(1U) + +#define S_STAG_FIFO_PERR_SET0 16 +#define V_STAG_FIFO_PERR_SET0(x) ((x) << S_STAG_FIFO_PERR_SET0) +#define F_STAG_FIFO_PERR_SET0 V_STAG_FIFO_PERR_SET0(1U) + +#define S_MAP_FIFO_PERR_SET3 15 +#define V_MAP_FIFO_PERR_SET3(x) ((x) << S_MAP_FIFO_PERR_SET3) +#define F_MAP_FIFO_PERR_SET3 V_MAP_FIFO_PERR_SET3(1U) + +#define S_MAP_FIFO_PERR_SET2 14 +#define V_MAP_FIFO_PERR_SET2(x) ((x) << S_MAP_FIFO_PERR_SET2) +#define F_MAP_FIFO_PERR_SET2 V_MAP_FIFO_PERR_SET2(1U) + +#define S_MAP_FIFO_PERR_SET1 13 +#define V_MAP_FIFO_PERR_SET1(x) ((x) << S_MAP_FIFO_PERR_SET1) +#define F_MAP_FIFO_PERR_SET1 V_MAP_FIFO_PERR_SET1(1U) + +#define S_MAP_FIFO_PERR_SET0 12 +#define V_MAP_FIFO_PERR_SET0(x) ((x) << S_MAP_FIFO_PERR_SET0) +#define F_MAP_FIFO_PERR_SET0 V_MAP_FIFO_PERR_SET0(1U) + +#define S_DMA_FIFO_PERR_SET3 11 +#define V_DMA_FIFO_PERR_SET3(x) ((x) << S_DMA_FIFO_PERR_SET3) +#define F_DMA_FIFO_PERR_SET3 V_DMA_FIFO_PERR_SET3(1U) + +#define S_DMA_FIFO_PERR_SET2 10 +#define V_DMA_FIFO_PERR_SET2(x) ((x) << S_DMA_FIFO_PERR_SET2) +#define F_DMA_FIFO_PERR_SET2 V_DMA_FIFO_PERR_SET2(1U) + +#define S_DMA_FIFO_PERR_SET1 9 +#define V_DMA_FIFO_PERR_SET1(x) ((x) << S_DMA_FIFO_PERR_SET1) +#define F_DMA_FIFO_PERR_SET1 V_DMA_FIFO_PERR_SET1(1U) + +#define S_DMA_FIFO_PERR_SET0 8 +#define V_DMA_FIFO_PERR_SET0(x) ((x) << S_DMA_FIFO_PERR_SET0) +#define F_DMA_FIFO_PERR_SET0 V_DMA_FIFO_PERR_SET0(1U) + #define A_ULP_TX_INT_CAUSE_2 0x8e80 #define A_ULP_TX_PERR_ENABLE_2 0x8e84 #define A_ULP_TX_SE_CNT_ERR 0x8ea0 @@ -20554,6 +29504,27 @@ #define G_DROP_CH0(x) (((x) >> S_DROP_CH0) & M_DROP_CH0) #define A_ULP_TX_T5_DROP_CNT 0x8eb8 + +#define S_DROP_INVLD_MC_CH3 28 +#define M_DROP_INVLD_MC_CH3 0xfU +#define V_DROP_INVLD_MC_CH3(x) ((x) << S_DROP_INVLD_MC_CH3) +#define G_DROP_INVLD_MC_CH3(x) (((x) >> S_DROP_INVLD_MC_CH3) & M_DROP_INVLD_MC_CH3) + +#define S_DROP_INVLD_MC_CH2 24 +#define M_DROP_INVLD_MC_CH2 0xfU +#define V_DROP_INVLD_MC_CH2(x) ((x) << S_DROP_INVLD_MC_CH2) +#define G_DROP_INVLD_MC_CH2(x) (((x) >> S_DROP_INVLD_MC_CH2) & M_DROP_INVLD_MC_CH2) + +#define S_DROP_INVLD_MC_CH1 20 +#define M_DROP_INVLD_MC_CH1 0xfU +#define V_DROP_INVLD_MC_CH1(x) ((x) << S_DROP_INVLD_MC_CH1) +#define G_DROP_INVLD_MC_CH1(x) (((x) >> S_DROP_INVLD_MC_CH1) & M_DROP_INVLD_MC_CH1) + +#define S_DROP_INVLD_MC_CH0 16 +#define M_DROP_INVLD_MC_CH0 0xfU +#define V_DROP_INVLD_MC_CH0(x) ((x) << S_DROP_INVLD_MC_CH0) +#define G_DROP_INVLD_MC_CH0(x) (((x) >> S_DROP_INVLD_MC_CH0) & M_DROP_INVLD_MC_CH0) + #define A_ULP_TX_CSU_REVISION 0x8ebc #define A_ULP_TX_LA_RDPTR_0 0x8ec0 #define A_ULP_TX_LA_RDDATA_0 0x8ec4 @@ -20610,6 +29581,100 @@ #define A_ULP_TX_ASIC_DEBUG_2 0x8f7c #define A_ULP_TX_ASIC_DEBUG_3 0x8f80 #define A_ULP_TX_ASIC_DEBUG_4 0x8f84 +#define A_ULP_TX_CPL_TX_DATA_FLAGS_MASK 0x8f88 + +#define S_BYPASS_FIRST 26 +#define V_BYPASS_FIRST(x) ((x) << S_BYPASS_FIRST) +#define F_BYPASS_FIRST V_BYPASS_FIRST(1U) + +#define S_BYPASS_MIDDLE 25 +#define V_BYPASS_MIDDLE(x) ((x) << S_BYPASS_MIDDLE) +#define F_BYPASS_MIDDLE V_BYPASS_MIDDLE(1U) + +#define S_BYPASS_LAST 24 +#define V_BYPASS_LAST(x) ((x) << S_BYPASS_LAST) +#define F_BYPASS_LAST V_BYPASS_LAST(1U) + +#define S_PUSH_FIRST 22 +#define V_PUSH_FIRST(x) ((x) << S_PUSH_FIRST) +#define F_PUSH_FIRST V_PUSH_FIRST(1U) + +#define S_PUSH_MIDDLE 21 +#define V_PUSH_MIDDLE(x) ((x) << S_PUSH_MIDDLE) +#define F_PUSH_MIDDLE V_PUSH_MIDDLE(1U) + +#define S_PUSH_LAST 20 +#define V_PUSH_LAST(x) ((x) << S_PUSH_LAST) +#define F_PUSH_LAST V_PUSH_LAST(1U) + +#define S_SAVE_FIRST 18 +#define V_SAVE_FIRST(x) ((x) << S_SAVE_FIRST) +#define F_SAVE_FIRST V_SAVE_FIRST(1U) + +#define S_SAVE_MIDDLE 17 +#define V_SAVE_MIDDLE(x) ((x) << S_SAVE_MIDDLE) +#define F_SAVE_MIDDLE V_SAVE_MIDDLE(1U) + +#define S_SAVE_LAST 16 +#define V_SAVE_LAST(x) ((x) << S_SAVE_LAST) +#define F_SAVE_LAST V_SAVE_LAST(1U) + +#define S_FLUSH_FIRST 14 +#define V_FLUSH_FIRST(x) ((x) << S_FLUSH_FIRST) +#define F_FLUSH_FIRST V_FLUSH_FIRST(1U) + +#define S_FLUSH_MIDDLE 13 +#define V_FLUSH_MIDDLE(x) ((x) << S_FLUSH_MIDDLE) +#define F_FLUSH_MIDDLE V_FLUSH_MIDDLE(1U) + +#define S_FLUSH_LAST 12 +#define V_FLUSH_LAST(x) ((x) << S_FLUSH_LAST) +#define F_FLUSH_LAST V_FLUSH_LAST(1U) + +#define S_URGENT_FIRST 10 +#define V_URGENT_FIRST(x) ((x) << S_URGENT_FIRST) +#define F_URGENT_FIRST V_URGENT_FIRST(1U) + +#define S_URGENT_MIDDLE 9 +#define V_URGENT_MIDDLE(x) ((x) << S_URGENT_MIDDLE) +#define F_URGENT_MIDDLE V_URGENT_MIDDLE(1U) + +#define S_URGENT_LAST 8 +#define V_URGENT_LAST(x) ((x) << S_URGENT_LAST) +#define F_URGENT_LAST V_URGENT_LAST(1U) + +#define S_MORE_FIRST 6 +#define V_MORE_FIRST(x) ((x) << S_MORE_FIRST) +#define F_MORE_FIRST V_MORE_FIRST(1U) + +#define S_MORE_MIDDLE 5 +#define V_MORE_MIDDLE(x) ((x) << S_MORE_MIDDLE) +#define F_MORE_MIDDLE V_MORE_MIDDLE(1U) + +#define S_MORE_LAST 4 +#define V_MORE_LAST(x) ((x) << S_MORE_LAST) +#define F_MORE_LAST V_MORE_LAST(1U) + +#define S_SHOVE_FIRST 2 +#define V_SHOVE_FIRST(x) ((x) << S_SHOVE_FIRST) +#define F_SHOVE_FIRST V_SHOVE_FIRST(1U) + +#define S_SHOVE_MIDDLE 1 +#define V_SHOVE_MIDDLE(x) ((x) << S_SHOVE_MIDDLE) +#define F_SHOVE_MIDDLE V_SHOVE_MIDDLE(1U) + +#define S_SHOVE_LAST 0 +#define V_SHOVE_LAST(x) ((x) << S_SHOVE_LAST) +#define F_SHOVE_LAST V_SHOVE_LAST(1U) + +#define A_ULP_TX_TLS_IND_CMD 0x8fb8 + +#define S_TLS_TX_REG_OFF_ADDR 0 +#define M_TLS_TX_REG_OFF_ADDR 0x3ffU +#define V_TLS_TX_REG_OFF_ADDR(x) ((x) << S_TLS_TX_REG_OFF_ADDR) +#define G_TLS_TX_REG_OFF_ADDR(x) (((x) >> S_TLS_TX_REG_OFF_ADDR) & M_TLS_TX_REG_OFF_ADDR) + +#define A_ULP_TX_TLS_IND_DATA 0x8fbc /* registers for module PM_RX */ #define PM_RX_BASE_ADDR 0x8fc0 @@ -21106,6 +30171,16 @@ #define V_DUPLICATE(x) ((x) << S_DUPLICATE) #define G_DUPLICATE(x) (((x) >> S_DUPLICATE) & M_DUPLICATE) +#define S_RX_PCMD_SRDY_STAT4 8 +#define M_RX_PCMD_SRDY_STAT4 0x3U +#define V_RX_PCMD_SRDY_STAT4(x) ((x) << S_RX_PCMD_SRDY_STAT4) +#define G_RX_PCMD_SRDY_STAT4(x) (((x) >> S_RX_PCMD_SRDY_STAT4) & M_RX_PCMD_SRDY_STAT4) + +#define S_RX_PCMD_DRDY_STAT4 6 +#define M_RX_PCMD_DRDY_STAT4 0x3U +#define V_RX_PCMD_DRDY_STAT4(x) ((x) << S_RX_PCMD_DRDY_STAT4) +#define G_RX_PCMD_DRDY_STAT4(x) (((x) >> S_RX_PCMD_DRDY_STAT4) & M_RX_PCMD_DRDY_STAT4) + #define A_PM_RX_DBG_STAT5 0x10026 #define S_RX_ATLST_1_PCMD_CH1 29 @@ -21116,6 +30191,16 @@ #define V_RX_ATLST_1_PCMD_CH0(x) ((x) << S_RX_ATLST_1_PCMD_CH0) #define F_RX_ATLST_1_PCMD_CH0 V_RX_ATLST_1_PCMD_CH0(1U) +#define S_T5_RX_PCMD_DRDY 26 +#define M_T5_RX_PCMD_DRDY 0x3U +#define V_T5_RX_PCMD_DRDY(x) ((x) << S_T5_RX_PCMD_DRDY) +#define G_T5_RX_PCMD_DRDY(x) (((x) >> S_T5_RX_PCMD_DRDY) & M_T5_RX_PCMD_DRDY) + +#define S_T5_RX_PCMD_SRDY 24 +#define M_T5_RX_PCMD_SRDY 0x3U +#define V_T5_RX_PCMD_SRDY(x) ((x) << S_T5_RX_PCMD_SRDY) +#define G_T5_RX_PCMD_SRDY(x) (((x) >> S_T5_RX_PCMD_SRDY) & M_T5_RX_PCMD_SRDY) + #define S_RX_ISPI_TXVALID 20 #define M_RX_ISPI_TXVALID 0xfU #define V_RX_ISPI_TXVALID(x) ((x) << S_RX_ISPI_TXVALID) @@ -21156,6 +30241,16 @@ #define V_RX_C_TXAFULL(x) ((x) << S_RX_C_TXAFULL) #define G_RX_C_TXAFULL(x) (((x) >> S_RX_C_TXAFULL) & M_RX_C_TXAFULL) +#define S_T6_RX_PCMD_DRDY 26 +#define M_T6_RX_PCMD_DRDY 0x3U +#define V_T6_RX_PCMD_DRDY(x) ((x) << S_T6_RX_PCMD_DRDY) +#define G_T6_RX_PCMD_DRDY(x) (((x) >> S_T6_RX_PCMD_DRDY) & M_T6_RX_PCMD_DRDY) + +#define S_T6_RX_PCMD_SRDY 24 +#define M_T6_RX_PCMD_SRDY 0x3U +#define V_T6_RX_PCMD_SRDY(x) ((x) << S_T6_RX_PCMD_SRDY) +#define G_T6_RX_PCMD_SRDY(x) (((x) >> S_T6_RX_PCMD_SRDY) & M_T6_RX_PCMD_SRDY) + #define A_PM_RX_DBG_STAT6 0x10027 #define S_RX_M_INTRNL_FIFO_CNT 4 @@ -21179,6 +30274,23 @@ #define V_RX_M_REQDATARDY(x) ((x) << S_RX_M_REQDATARDY) #define F_RX_M_REQDATARDY V_RX_M_REQDATARDY(1U) +#define S_T6_RX_M_INTRNL_FIFO_CNT 7 +#define M_T6_RX_M_INTRNL_FIFO_CNT 0x3U +#define V_T6_RX_M_INTRNL_FIFO_CNT(x) ((x) << S_T6_RX_M_INTRNL_FIFO_CNT) +#define G_T6_RX_M_INTRNL_FIFO_CNT(x) (((x) >> S_T6_RX_M_INTRNL_FIFO_CNT) & M_T6_RX_M_INTRNL_FIFO_CNT) + +#define S_RX_M_RSPVLD 6 +#define V_RX_M_RSPVLD(x) ((x) << S_RX_M_RSPVLD) +#define F_RX_M_RSPVLD V_RX_M_RSPVLD(1U) + +#define S_RX_M_RSPRDY 5 +#define V_RX_M_RSPRDY(x) ((x) << S_RX_M_RSPRDY) +#define F_RX_M_RSPRDY V_RX_M_RSPRDY(1U) + +#define S_RX_M_REQADDRVLD 4 +#define V_RX_M_REQADDRVLD(x) ((x) << S_RX_M_REQADDRVLD) +#define F_RX_M_REQADDRVLD V_RX_M_REQADDRVLD(1U) + #define A_PM_RX_DBG_STAT7 0x10028 #define S_RX_PCMD1_FREE_CNT 7 @@ -21560,6 +30672,14 @@ #define V_OSPI_OVERFLOW2(x) ((x) << S_OSPI_OVERFLOW2) #define F_OSPI_OVERFLOW2 V_OSPI_OVERFLOW2(1U) +#define S_T5_OSPI_OVERFLOW1 5 +#define V_T5_OSPI_OVERFLOW1(x) ((x) << S_T5_OSPI_OVERFLOW1) +#define F_T5_OSPI_OVERFLOW1 V_T5_OSPI_OVERFLOW1(1U) + +#define S_T5_OSPI_OVERFLOW0 4 +#define V_T5_OSPI_OVERFLOW0(x) ((x) << S_T5_OSPI_OVERFLOW0) +#define F_T5_OSPI_OVERFLOW0 V_T5_OSPI_OVERFLOW0(1U) + #define S_M_INTFPERREN 3 #define V_M_INTFPERREN(x) ((x) << S_M_INTFPERREN) #define F_M_INTFPERREN V_M_INTFPERREN(1U) @@ -21576,6 +30696,22 @@ #define V_SDC_ERR_EN(x) ((x) << S_SDC_ERR_EN) #define F_SDC_ERR_EN V_SDC_ERR_EN(1U) +#define S_OSPI_OVERFLOW3_T5 7 +#define V_OSPI_OVERFLOW3_T5(x) ((x) << S_OSPI_OVERFLOW3_T5) +#define F_OSPI_OVERFLOW3_T5 V_OSPI_OVERFLOW3_T5(1U) + +#define S_OSPI_OVERFLOW2_T5 6 +#define V_OSPI_OVERFLOW2_T5(x) ((x) << S_OSPI_OVERFLOW2_T5) +#define F_OSPI_OVERFLOW2_T5 V_OSPI_OVERFLOW2_T5(1U) + +#define S_OSPI_OVERFLOW1_T5 5 +#define V_OSPI_OVERFLOW1_T5(x) ((x) << S_OSPI_OVERFLOW1_T5) +#define F_OSPI_OVERFLOW1_T5 V_OSPI_OVERFLOW1_T5(1U) + +#define S_OSPI_OVERFLOW0_T5 4 +#define V_OSPI_OVERFLOW0_T5(x) ((x) << S_OSPI_OVERFLOW0_T5) +#define F_OSPI_OVERFLOW0_T5 V_OSPI_OVERFLOW0_T5(1U) + #define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD0 0x10023 #define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD1 0x10024 #define A_PM_TX_PRFTCH_WRR_WAIT_CNT_THRSHLD2 0x10025 @@ -21599,34 +30735,38 @@ #define A_PM_TX_INT_CAUSE_MASK_HALT 0x1002b #define A_PM_TX_DBG_STAT0 0x1002c -#define S_RD_I_BUSY 28 +#define S_RD_I_BUSY 29 #define V_RD_I_BUSY(x) ((x) << S_RD_I_BUSY) #define F_RD_I_BUSY V_RD_I_BUSY(1U) -#define S_WR_O_ONLY 27 -#define V_WR_O_ONLY(x) ((x) << S_WR_O_ONLY) -#define F_WR_O_ONLY V_WR_O_ONLY(1U) +#define S_WR_O_BUSY 28 +#define V_WR_O_BUSY(x) ((x) << S_WR_O_BUSY) +#define F_WR_O_BUSY V_WR_O_BUSY(1U) -#define S_M_TO_BUSY 26 -#define V_M_TO_BUSY(x) ((x) << S_M_TO_BUSY) -#define F_M_TO_BUSY V_M_TO_BUSY(1U) +#define S_M_TO_O_BUSY 27 +#define V_M_TO_O_BUSY(x) ((x) << S_M_TO_O_BUSY) +#define F_M_TO_O_BUSY V_M_TO_O_BUSY(1U) -#define S_I_TO_M_BUSY 25 +#define S_I_TO_M_BUSY 26 #define V_I_TO_M_BUSY(x) ((x) << S_I_TO_M_BUSY) #define F_I_TO_M_BUSY V_I_TO_M_BUSY(1U) -#define S_PCMD_FB_ONLY 24 +#define S_PCMD_FB_ONLY 25 #define V_PCMD_FB_ONLY(x) ((x) << S_PCMD_FB_ONLY) #define F_PCMD_FB_ONLY V_PCMD_FB_ONLY(1U) -#define S_PCMD_MEM 23 +#define S_PCMD_MEM 24 #define V_PCMD_MEM(x) ((x) << S_PCMD_MEM) #define F_PCMD_MEM V_PCMD_MEM(1U) -#define S_PCMD_BYPASS 22 +#define S_PCMD_BYPASS 23 #define V_PCMD_BYPASS(x) ((x) << S_PCMD_BYPASS) #define F_PCMD_BYPASS V_PCMD_BYPASS(1U) +#define S_PCMD_EOP2 22 +#define V_PCMD_EOP2(x) ((x) << S_PCMD_EOP2) +#define F_PCMD_EOP2 V_PCMD_EOP2(1U) + #define S_PCMD_EOP 21 #define V_PCMD_EOP(x) ((x) << S_PCMD_EOP) #define F_PCMD_EOP V_PCMD_EOP(1U) @@ -21645,6 +30785,34 @@ #define V_CUR_PCMD_LEN(x) ((x) << S_CUR_PCMD_LEN) #define G_CUR_PCMD_LEN(x) (((x) >> S_CUR_PCMD_LEN) & M_CUR_PCMD_LEN) +#define S_T6_RD_I_BUSY 28 +#define V_T6_RD_I_BUSY(x) ((x) << S_T6_RD_I_BUSY) +#define F_T6_RD_I_BUSY V_T6_RD_I_BUSY(1U) + +#define S_T6_WR_O_BUSY 27 +#define V_T6_WR_O_BUSY(x) ((x) << S_T6_WR_O_BUSY) +#define F_T6_WR_O_BUSY V_T6_WR_O_BUSY(1U) + +#define S_T6_M_TO_O_BUSY 26 +#define V_T6_M_TO_O_BUSY(x) ((x) << S_T6_M_TO_O_BUSY) +#define F_T6_M_TO_O_BUSY V_T6_M_TO_O_BUSY(1U) + +#define S_T6_I_TO_M_BUSY 25 +#define V_T6_I_TO_M_BUSY(x) ((x) << S_T6_I_TO_M_BUSY) +#define F_T6_I_TO_M_BUSY V_T6_I_TO_M_BUSY(1U) + +#define S_T6_PCMD_FB_ONLY 24 +#define V_T6_PCMD_FB_ONLY(x) ((x) << S_T6_PCMD_FB_ONLY) +#define F_T6_PCMD_FB_ONLY V_T6_PCMD_FB_ONLY(1U) + +#define S_T6_PCMD_MEM 23 +#define V_T6_PCMD_MEM(x) ((x) << S_T6_PCMD_MEM) +#define F_T6_PCMD_MEM V_T6_PCMD_MEM(1U) + +#define S_T6_PCMD_BYPASS 22 +#define V_T6_PCMD_BYPASS(x) ((x) << S_T6_PCMD_BYPASS) +#define F_T6_PCMD_BYPASS V_T6_PCMD_BYPASS(1U) + #define A_PM_TX_DBG_STAT1 0x1002d #define S_PCMD_MEM0 31 @@ -21979,6 +31147,33 @@ #define V_M_REQDATARDY(x) ((x) << S_M_REQDATARDY) #define F_M_REQDATARDY V_M_REQDATARDY(1U) +#define S_T6_MC_RSP_FIFO_CNT 27 +#define M_T6_MC_RSP_FIFO_CNT 0x3U +#define V_T6_MC_RSP_FIFO_CNT(x) ((x) << S_T6_MC_RSP_FIFO_CNT) +#define G_T6_MC_RSP_FIFO_CNT(x) (((x) >> S_T6_MC_RSP_FIFO_CNT) & M_T6_MC_RSP_FIFO_CNT) + +#define S_T6_PCMD_FREE_CNT0 17 +#define M_T6_PCMD_FREE_CNT0 0x3ffU +#define V_T6_PCMD_FREE_CNT0(x) ((x) << S_T6_PCMD_FREE_CNT0) +#define G_T6_PCMD_FREE_CNT0(x) (((x) >> S_T6_PCMD_FREE_CNT0) & M_T6_PCMD_FREE_CNT0) + +#define S_T6_PCMD_FREE_CNT1 7 +#define M_T6_PCMD_FREE_CNT1 0x3ffU +#define V_T6_PCMD_FREE_CNT1(x) ((x) << S_T6_PCMD_FREE_CNT1) +#define G_T6_PCMD_FREE_CNT1(x) (((x) >> S_T6_PCMD_FREE_CNT1) & M_T6_PCMD_FREE_CNT1) + +#define S_M_RSPVLD 6 +#define V_M_RSPVLD(x) ((x) << S_M_RSPVLD) +#define F_M_RSPVLD V_M_RSPVLD(1U) + +#define S_M_RSPRDY 5 +#define V_M_RSPRDY(x) ((x) << S_M_RSPRDY) +#define F_M_RSPRDY V_M_RSPRDY(1U) + +#define S_M_REQADDRVLD 4 +#define V_M_REQADDRVLD(x) ((x) << S_M_REQADDRVLD) +#define F_M_REQADDRVLD V_M_REQADDRVLD(1U) + #define A_PM_TX_DBG_STAT9 0x10035 #define S_PCMD_FREE_CNT2 10 @@ -22121,6 +31316,16 @@ #define V_BUNDLE_LEN0(x) ((x) << S_BUNDLE_LEN0) #define G_BUNDLE_LEN0(x) (((x) >> S_BUNDLE_LEN0) & M_BUNDLE_LEN0) +#define S_T6_BUNDLE_LEN_SRDY 24 +#define M_T6_BUNDLE_LEN_SRDY 0x3U +#define V_T6_BUNDLE_LEN_SRDY(x) ((x) << S_T6_BUNDLE_LEN_SRDY) +#define G_T6_BUNDLE_LEN_SRDY(x) (((x) >> S_T6_BUNDLE_LEN_SRDY) & M_T6_BUNDLE_LEN_SRDY) + +#define S_T6_BUNDLE_LEN1 12 +#define M_T6_BUNDLE_LEN1 0xfffU +#define V_T6_BUNDLE_LEN1(x) ((x) << S_T6_BUNDLE_LEN1) +#define G_T6_BUNDLE_LEN1(x) (((x) >> S_T6_BUNDLE_LEN1) & M_T6_BUNDLE_LEN1) + #define A_PM_TX_DBG_STAT16 0x1003c #define S_BUNDLE_LEN3 16 @@ -22457,6 +31662,22 @@ #define V_PTP_FWD_UP(x) ((x) << S_PTP_FWD_UP) #define F_PTP_FWD_UP V_PTP_FWD_UP(1U) +#define S_HASH_PRIO_SEL_LPBK 25 +#define V_HASH_PRIO_SEL_LPBK(x) ((x) << S_HASH_PRIO_SEL_LPBK) +#define F_HASH_PRIO_SEL_LPBK V_HASH_PRIO_SEL_LPBK(1U) + +#define S_HASH_PRIO_SEL_MAC 24 +#define V_HASH_PRIO_SEL_MAC(x) ((x) << S_HASH_PRIO_SEL_MAC) +#define F_HASH_PRIO_SEL_MAC V_HASH_PRIO_SEL_MAC(1U) + +#define S_HASH_EN_LPBK 23 +#define V_HASH_EN_LPBK(x) ((x) << S_HASH_EN_LPBK) +#define F_HASH_EN_LPBK V_HASH_EN_LPBK(1U) + +#define S_HASH_EN_MAC 22 +#define V_HASH_EN_MAC(x) ((x) << S_HASH_EN_MAC) +#define F_HASH_EN_MAC V_HASH_EN_MAC(1U) + #define A_MPS_PORT_RX_MTU 0x104 #define A_MPS_PORT_RX_PF_MAP 0x108 #define A_MPS_PORT_RX_VF_MAP0 0x10c @@ -22526,9 +31747,179 @@ #define V_FIXED_VF(x) ((x) << S_FIXED_VF) #define G_FIXED_VF(x) (((x) >> S_FIXED_VF) & M_FIXED_VF) +#define S_T6_FIXED_PFVF_MAC 14 +#define V_T6_FIXED_PFVF_MAC(x) ((x) << S_T6_FIXED_PFVF_MAC) +#define F_T6_FIXED_PFVF_MAC V_T6_FIXED_PFVF_MAC(1U) + +#define S_T6_FIXED_PFVF_LPBK 13 +#define V_T6_FIXED_PFVF_LPBK(x) ((x) << S_T6_FIXED_PFVF_LPBK) +#define F_T6_FIXED_PFVF_LPBK V_T6_FIXED_PFVF_LPBK(1U) + +#define S_T6_FIXED_PFVF_LPBK_OV 12 +#define V_T6_FIXED_PFVF_LPBK_OV(x) ((x) << S_T6_FIXED_PFVF_LPBK_OV) +#define F_T6_FIXED_PFVF_LPBK_OV V_T6_FIXED_PFVF_LPBK_OV(1U) + +#define S_T6_FIXED_PF 9 +#define M_T6_FIXED_PF 0x7U +#define V_T6_FIXED_PF(x) ((x) << S_T6_FIXED_PF) +#define G_T6_FIXED_PF(x) (((x) >> S_T6_FIXED_PF) & M_T6_FIXED_PF) + +#define S_T6_FIXED_VF_VLD 8 +#define V_T6_FIXED_VF_VLD(x) ((x) << S_T6_FIXED_VF_VLD) +#define F_T6_FIXED_VF_VLD V_T6_FIXED_VF_VLD(1U) + +#define S_T6_FIXED_VF 0 +#define M_T6_FIXED_VF 0xffU +#define V_T6_FIXED_VF(x) ((x) << S_T6_FIXED_VF) +#define G_T6_FIXED_VF(x) (((x) >> S_T6_FIXED_VF) & M_T6_FIXED_VF) + #define A_MPS_PORT_RX_SPARE 0x13c #define A_MPS_PORT_RX_PTP_RSS_HASH 0x140 #define A_MPS_PORT_RX_PTP_RSS_CONTROL 0x144 +#define A_MPS_PORT_RX_TS_VLD 0x148 + +#define S_TS_VLD 0 +#define M_TS_VLD 0x3U +#define V_TS_VLD(x) ((x) << S_TS_VLD) +#define G_TS_VLD(x) (((x) >> S_TS_VLD) & M_TS_VLD) + +#define A_MPS_PORT_RX_TNL_LKP_INNER_SEL 0x14c + +#define S_LKP_SEL 0 +#define V_LKP_SEL(x) ((x) << S_LKP_SEL) +#define F_LKP_SEL V_LKP_SEL(1U) + +#define A_MPS_PORT_RX_VF_MAP4 0x150 +#define A_MPS_PORT_RX_VF_MAP5 0x154 +#define A_MPS_PORT_RX_VF_MAP6 0x158 +#define A_MPS_PORT_RX_VF_MAP7 0x15c +#define A_MPS_PORT_RX_PRS_DEBUG_FLAG_MAC 0x160 + +#define S_OUTER_IPV4_N_INNER_IPV4 31 +#define V_OUTER_IPV4_N_INNER_IPV4(x) ((x) << S_OUTER_IPV4_N_INNER_IPV4) +#define F_OUTER_IPV4_N_INNER_IPV4 V_OUTER_IPV4_N_INNER_IPV4(1U) + +#define S_OUTER_IPV4_N_INNER_IPV6 30 +#define V_OUTER_IPV4_N_INNER_IPV6(x) ((x) << S_OUTER_IPV4_N_INNER_IPV6) +#define F_OUTER_IPV4_N_INNER_IPV6 V_OUTER_IPV4_N_INNER_IPV6(1U) + +#define S_OUTER_IPV6_N_INNER_IPV4 29 +#define V_OUTER_IPV6_N_INNER_IPV4(x) ((x) << S_OUTER_IPV6_N_INNER_IPV4) +#define F_OUTER_IPV6_N_INNER_IPV4 V_OUTER_IPV6_N_INNER_IPV4(1U) + +#define S_OUTER_IPV6_N_INNER_IPV6 28 +#define V_OUTER_IPV6_N_INNER_IPV6(x) ((x) << S_OUTER_IPV6_N_INNER_IPV6) +#define F_OUTER_IPV6_N_INNER_IPV6 V_OUTER_IPV6_N_INNER_IPV6(1U) + +#define S_OUTER_IPV4_N_VLAN_NVGRE 27 +#define V_OUTER_IPV4_N_VLAN_NVGRE(x) ((x) << S_OUTER_IPV4_N_VLAN_NVGRE) +#define F_OUTER_IPV4_N_VLAN_NVGRE V_OUTER_IPV4_N_VLAN_NVGRE(1U) + +#define S_OUTER_IPV6_N_VLAN_NVGRE 26 +#define V_OUTER_IPV6_N_VLAN_NVGRE(x) ((x) << S_OUTER_IPV6_N_VLAN_NVGRE) +#define F_OUTER_IPV6_N_VLAN_NVGRE V_OUTER_IPV6_N_VLAN_NVGRE(1U) + +#define S_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE 25 +#define V_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE) +#define F_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE V_OUTER_IPV4_N_DOUBLE_VLAN_NVGRE(1U) + +#define S_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE 24 +#define V_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE) +#define F_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE V_OUTER_IPV6_N_DOUBLE_VLAN_NVGRE(1U) + +#define S_OUTER_IPV4_N_VLAN_GRE 23 +#define V_OUTER_IPV4_N_VLAN_GRE(x) ((x) << S_OUTER_IPV4_N_VLAN_GRE) +#define F_OUTER_IPV4_N_VLAN_GRE V_OUTER_IPV4_N_VLAN_GRE(1U) + +#define S_OUTER_IPV6_N_VLAN_GRE 22 +#define V_OUTER_IPV6_N_VLAN_GRE(x) ((x) << S_OUTER_IPV6_N_VLAN_GRE) +#define F_OUTER_IPV6_N_VLAN_GRE V_OUTER_IPV6_N_VLAN_GRE(1U) + +#define S_OUTER_IPV4_N_DOUBLE_VLAN_GRE 21 +#define V_OUTER_IPV4_N_DOUBLE_VLAN_GRE(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_GRE) +#define F_OUTER_IPV4_N_DOUBLE_VLAN_GRE V_OUTER_IPV4_N_DOUBLE_VLAN_GRE(1U) + +#define S_OUTER_IPV6_N_DOUBLE_VLAN_GRE 20 +#define V_OUTER_IPV6_N_DOUBLE_VLAN_GRE(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_GRE) +#define F_OUTER_IPV6_N_DOUBLE_VLAN_GRE V_OUTER_IPV6_N_DOUBLE_VLAN_GRE(1U) + +#define S_OUTER_IPV4_N_VLAN_VXLAN 19 +#define V_OUTER_IPV4_N_VLAN_VXLAN(x) ((x) << S_OUTER_IPV4_N_VLAN_VXLAN) +#define F_OUTER_IPV4_N_VLAN_VXLAN V_OUTER_IPV4_N_VLAN_VXLAN(1U) + +#define S_OUTER_IPV6_N_VLAN_VXLAN 18 +#define V_OUTER_IPV6_N_VLAN_VXLAN(x) ((x) << S_OUTER_IPV6_N_VLAN_VXLAN) +#define F_OUTER_IPV6_N_VLAN_VXLAN V_OUTER_IPV6_N_VLAN_VXLAN(1U) + +#define S_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN 17 +#define V_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN) +#define F_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN V_OUTER_IPV4_N_DOUBLE_VLAN_VXLAN(1U) + +#define S_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN 16 +#define V_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN) +#define F_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN V_OUTER_IPV6_N_DOUBLE_VLAN_VXLAN(1U) + +#define S_OUTER_IPV4_N_VLAN_GENEVE 15 +#define V_OUTER_IPV4_N_VLAN_GENEVE(x) ((x) << S_OUTER_IPV4_N_VLAN_GENEVE) +#define F_OUTER_IPV4_N_VLAN_GENEVE V_OUTER_IPV4_N_VLAN_GENEVE(1U) + +#define S_OUTER_IPV6_N_VLAN_GENEVE 14 +#define V_OUTER_IPV6_N_VLAN_GENEVE(x) ((x) << S_OUTER_IPV6_N_VLAN_GENEVE) +#define F_OUTER_IPV6_N_VLAN_GENEVE V_OUTER_IPV6_N_VLAN_GENEVE(1U) + +#define S_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE 13 +#define V_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE(x) ((x) << S_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE) +#define F_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE V_OUTER_IPV4_N_DOUBLE_VLAN_GENEVE(1U) + +#define S_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE 12 +#define V_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE(x) ((x) << S_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE) +#define F_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE V_OUTER_IPV6_N_DOUBLE_VLAN_GENEVE(1U) + +#define S_ERR_TNL_HDR_LEN 11 +#define V_ERR_TNL_HDR_LEN(x) ((x) << S_ERR_TNL_HDR_LEN) +#define F_ERR_TNL_HDR_LEN V_ERR_TNL_HDR_LEN(1U) + +#define S_NON_RUNT_FRAME 10 +#define V_NON_RUNT_FRAME(x) ((x) << S_NON_RUNT_FRAME) +#define F_NON_RUNT_FRAME V_NON_RUNT_FRAME(1U) + +#define S_INNER_VLAN_VLD 9 +#define V_INNER_VLAN_VLD(x) ((x) << S_INNER_VLAN_VLD) +#define F_INNER_VLAN_VLD V_INNER_VLAN_VLD(1U) + +#define S_ERR_IP_PAYLOAD_LEN 8 +#define V_ERR_IP_PAYLOAD_LEN(x) ((x) << S_ERR_IP_PAYLOAD_LEN) +#define F_ERR_IP_PAYLOAD_LEN V_ERR_IP_PAYLOAD_LEN(1U) + +#define S_ERR_UDP_PAYLOAD_LEN 7 +#define V_ERR_UDP_PAYLOAD_LEN(x) ((x) << S_ERR_UDP_PAYLOAD_LEN) +#define F_ERR_UDP_PAYLOAD_LEN V_ERR_UDP_PAYLOAD_LEN(1U) + +#define A_MPS_PORT_RX_PRS_DEBUG_FLAG_LPBK 0x164 + +#define S_T6_INNER_VLAN_VLD 10 +#define V_T6_INNER_VLAN_VLD(x) ((x) << S_T6_INNER_VLAN_VLD) +#define F_T6_INNER_VLAN_VLD V_T6_INNER_VLAN_VLD(1U) + +#define S_T6_ERR_IP_PAYLOAD_LEN 9 +#define V_T6_ERR_IP_PAYLOAD_LEN(x) ((x) << S_T6_ERR_IP_PAYLOAD_LEN) +#define F_T6_ERR_IP_PAYLOAD_LEN V_T6_ERR_IP_PAYLOAD_LEN(1U) + +#define S_T6_ERR_UDP_PAYLOAD_LEN 8 +#define V_T6_ERR_UDP_PAYLOAD_LEN(x) ((x) << S_T6_ERR_UDP_PAYLOAD_LEN) +#define F_T6_ERR_UDP_PAYLOAD_LEN V_T6_ERR_UDP_PAYLOAD_LEN(1U) + +#define A_MPS_PORT_RX_REPL_VECT_SEL 0x168 + +#define S_DIS_REPL_VECT_SEL 4 +#define V_DIS_REPL_VECT_SEL(x) ((x) << S_DIS_REPL_VECT_SEL) +#define F_DIS_REPL_VECT_SEL V_DIS_REPL_VECT_SEL(1U) + +#define S_REPL_VECT_SEL 0 +#define M_REPL_VECT_SEL 0xfU +#define V_REPL_VECT_SEL(x) ((x) << S_REPL_VECT_SEL) +#define G_REPL_VECT_SEL(x) (((x) >> S_REPL_VECT_SEL) & M_REPL_VECT_SEL) + #define A_MPS_PORT_TX_MAC_RELOAD_CH0 0x190 #define S_CREDIT 0 @@ -22561,6 +31952,16 @@ #define V_MAXPKTCNT(x) ((x) << S_MAXPKTCNT) #define G_MAXPKTCNT(x) (((x) >> S_MAXPKTCNT) & M_MAXPKTCNT) +#define S_OUT_TH 22 +#define M_OUT_TH 0xffU +#define V_OUT_TH(x) ((x) << S_OUT_TH) +#define G_OUT_TH(x) (((x) >> S_OUT_TH) & M_OUT_TH) + +#define S_IN_TH 14 +#define M_IN_TH 0xffU +#define V_IN_TH(x) ((x) << S_IN_TH) +#define G_IN_TH(x) (((x) >> S_IN_TH) & M_IN_TH) + #define A_MPS_PORT_FPGA_PAUSE_CTL 0x1c8 #define S_FPGAPAUSEEN 0 @@ -22617,6 +32018,50 @@ #define V_VF(x) ((x) << S_VF) #define G_VF(x) (((x) >> S_VF) & M_VF) +#define S_DISENCAPOUTERRPLCT 23 +#define V_DISENCAPOUTERRPLCT(x) ((x) << S_DISENCAPOUTERRPLCT) +#define F_DISENCAPOUTERRPLCT V_DISENCAPOUTERRPLCT(1U) + +#define S_DISENCAP 22 +#define V_DISENCAP(x) ((x) << S_DISENCAP) +#define F_DISENCAP V_DISENCAP(1U) + +#define S_T6_VALID 21 +#define V_T6_VALID(x) ((x) << S_T6_VALID) +#define F_T6_VALID V_T6_VALID(1U) + +#define S_T6_HASHPORTMAP 17 +#define M_T6_HASHPORTMAP 0xfU +#define V_T6_HASHPORTMAP(x) ((x) << S_T6_HASHPORTMAP) +#define G_T6_HASHPORTMAP(x) (((x) >> S_T6_HASHPORTMAP) & M_T6_HASHPORTMAP) + +#define S_T6_MULTILISTEN 16 +#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN) +#define F_T6_MULTILISTEN V_T6_MULTILISTEN(1U) + +#define S_T6_PRIORITY 13 +#define M_T6_PRIORITY 0x7U +#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY) +#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY) + +#define S_T6_REPLICATE 12 +#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE) +#define F_T6_REPLICATE V_T6_REPLICATE(1U) + +#define S_T6_PF 9 +#define M_T6_PF 0x7U +#define V_T6_PF(x) ((x) << S_T6_PF) +#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF) + +#define S_T6_VF_VALID 8 +#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID) +#define F_T6_VF_VALID V_T6_VF_VALID(1U) + +#define S_T6_VF 0 +#define M_T6_VF 0xffU +#define V_T6_VF(x) ((x) << S_T6_VF) +#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF) + #define A_MPS_PF_CTL 0x2c0 #define S_TXEN 1 @@ -22663,6 +32108,33 @@ #define V_PROMISCEN(x) ((x) << S_PROMISCEN) #define F_PROMISCEN V_PROMISCEN(1U) +#define S_T6_MULTILISTEN 16 +#define V_T6_MULTILISTEN(x) ((x) << S_T6_MULTILISTEN) +#define F_T6_MULTILISTEN V_T6_MULTILISTEN(1U) + +#define S_T6_PRIORITY 13 +#define M_T6_PRIORITY 0x7U +#define V_T6_PRIORITY(x) ((x) << S_T6_PRIORITY) +#define G_T6_PRIORITY(x) (((x) >> S_T6_PRIORITY) & M_T6_PRIORITY) + +#define S_T6_REPLICATE 12 +#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE) +#define F_T6_REPLICATE V_T6_REPLICATE(1U) + +#define S_T6_PF 9 +#define M_T6_PF 0x7U +#define V_T6_PF(x) ((x) << S_T6_PF) +#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF) + +#define S_T6_VF_VALID 8 +#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID) +#define F_T6_VF_VALID V_T6_VF_VALID(1U) + +#define S_T6_VF 0 +#define M_T6_VF 0xffU +#define V_T6_VF(x) ((x) << S_T6_VF) +#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF) + #define A_MPS_PF_STAT_TX_PF_BCAST_FRAMES_H 0x30c #define A_MPS_PORT_CLS_BMC_MAC_ADDR_L 0x30c #define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_L 0x310 @@ -22676,6 +32148,10 @@ #define V_BMC_VLD(x) ((x) << S_BMC_VLD) #define F_BMC_VLD V_BMC_VLD(1U) +#define S_MATCHALL 18 +#define V_MATCHALL(x) ((x) << S_MATCHALL) +#define F_MATCHALL V_MATCHALL(1U) + #define A_MPS_PF_STAT_TX_PF_MCAST_BYTES_H 0x314 #define A_MPS_PORT_CLS_BMC_VLAN 0x314 @@ -22739,7 +32215,24 @@ #define G_DMAC_TCAM_SEL(x) (((x) >> S_DMAC_TCAM_SEL) & M_DMAC_TCAM_SEL) #define A_MPS_PF_STAT_TX_PF_MCAST_FRAMES_H 0x31c +#define A_MPS_PORT_CLS_NCSI_ETH_TYPE 0x31c + +#define S_ETHTYPE2 0 +#define M_ETHTYPE2 0xffffU +#define V_ETHTYPE2(x) ((x) << S_ETHTYPE2) +#define G_ETHTYPE2(x) (((x) >> S_ETHTYPE2) & M_ETHTYPE2) + #define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_L 0x320 +#define A_MPS_PORT_CLS_NCSI_ETH_TYPE_EN 0x320 + +#define S_EN1 1 +#define V_EN1(x) ((x) << S_EN1) +#define F_EN1 V_EN1(1U) + +#define S_EN2 0 +#define V_EN2(x) ((x) << S_EN2) +#define F_EN2 V_EN2(1U) + #define A_MPS_PF_STAT_TX_PF_UCAST_BYTES_H 0x324 #define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_L 0x328 #define A_MPS_PF_STAT_TX_PF_UCAST_FRAMES_H 0x32c @@ -22915,6 +32408,19 @@ #define V_LPBKCRDTCTRL(x) ((x) << S_LPBKCRDTCTRL) #define F_LPBKCRDTCTRL V_LPBKCRDTCTRL(1U) +#define S_TX_PORT_STATS_MODE 8 +#define V_TX_PORT_STATS_MODE(x) ((x) << S_TX_PORT_STATS_MODE) +#define F_TX_PORT_STATS_MODE V_TX_PORT_STATS_MODE(1U) + +#define S_T5MODE 7 +#define V_T5MODE(x) ((x) << S_T5MODE) +#define F_T5MODE V_T5MODE(1U) + +#define S_SPEEDMODE 5 +#define M_SPEEDMODE 0x3U +#define V_SPEEDMODE(x) ((x) << S_SPEEDMODE) +#define G_SPEEDMODE(x) (((x) >> S_SPEEDMODE) & M_SPEEDMODE) + #define A_MPS_INT_ENABLE 0x9004 #define S_STATINTENB 5 @@ -23140,6 +32646,33 @@ #define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH14 0x90f4 #define A_MPS_TX_PAUSE_RETRANS_BUF_GRP_TH15 0x90f8 #define A_MPS_BUILD_REVISION 0x90fc +#define A_MPS_VF_TX_CTL_159_128 0x9100 +#define A_MPS_VF_TX_CTL_191_160 0x9104 +#define A_MPS_VF_TX_CTL_223_192 0x9108 +#define A_MPS_VF_TX_CTL_255_224 0x910c +#define A_MPS_VF_RX_CTL_159_128 0x9110 +#define A_MPS_VF_RX_CTL_191_160 0x9114 +#define A_MPS_VF_RX_CTL_223_192 0x9118 +#define A_MPS_VF_RX_CTL_255_224 0x911c +#define A_MPS_FPGA_BIST_CFG_P0 0x9120 + +#define S_ADDRMASK 16 +#define M_ADDRMASK 0xffffU +#define V_ADDRMASK(x) ((x) << S_ADDRMASK) +#define G_ADDRMASK(x) (((x) >> S_ADDRMASK) & M_ADDRMASK) + +#define S_T6_BASEADDR 0 +#define M_T6_BASEADDR 0xffffU +#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR) +#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR) + +#define A_MPS_FPGA_BIST_CFG_P1 0x9124 + +#define S_T6_BASEADDR 0 +#define M_T6_BASEADDR 0xffffU +#define V_T6_BASEADDR(x) ((x) << S_T6_BASEADDR) +#define G_T6_BASEADDR(x) (((x) >> S_T6_BASEADDR) & M_T6_BASEADDR) + #define A_MPS_TX_PRTY_SEL 0x9400 #define S_CH4_PRTY 20 @@ -23215,6 +32748,7 @@ #define G_TPFIFO(x) (((x) >> S_TPFIFO) & M_TPFIFO) #define A_MPS_TX_INT_CAUSE 0x9408 +#define A_MPS_TX_NCSI2MPS_CNT 0x940c #define A_MPS_TX_PERR_ENABLE 0x9410 #define A_MPS_TX_PERR_INJECT 0x9414 @@ -23369,6 +32903,50 @@ #define V_DATACH0(x) ((x) << S_DATACH0) #define G_DATACH0(x) (((x) >> S_DATACH0) & M_DATACH0) +#define S_T5_SIZECH1 26 +#define M_T5_SIZECH1 0xfU +#define V_T5_SIZECH1(x) ((x) << S_T5_SIZECH1) +#define G_T5_SIZECH1(x) (((x) >> S_T5_SIZECH1) & M_T5_SIZECH1) + +#define S_T5_ERRCH1 25 +#define V_T5_ERRCH1(x) ((x) << S_T5_ERRCH1) +#define F_T5_ERRCH1 V_T5_ERRCH1(1U) + +#define S_T5_FULLCH1 24 +#define V_T5_FULLCH1(x) ((x) << S_T5_FULLCH1) +#define F_T5_FULLCH1 V_T5_FULLCH1(1U) + +#define S_T5_VALIDCH1 23 +#define V_T5_VALIDCH1(x) ((x) << S_T5_VALIDCH1) +#define F_T5_VALIDCH1 V_T5_VALIDCH1(1U) + +#define S_T5_DATACH1 16 +#define M_T5_DATACH1 0x7fU +#define V_T5_DATACH1(x) ((x) << S_T5_DATACH1) +#define G_T5_DATACH1(x) (((x) >> S_T5_DATACH1) & M_T5_DATACH1) + +#define S_T5_SIZECH0 10 +#define M_T5_SIZECH0 0xfU +#define V_T5_SIZECH0(x) ((x) << S_T5_SIZECH0) +#define G_T5_SIZECH0(x) (((x) >> S_T5_SIZECH0) & M_T5_SIZECH0) + +#define S_T5_ERRCH0 9 +#define V_T5_ERRCH0(x) ((x) << S_T5_ERRCH0) +#define F_T5_ERRCH0 V_T5_ERRCH0(1U) + +#define S_T5_FULLCH0 8 +#define V_T5_FULLCH0(x) ((x) << S_T5_FULLCH0) +#define F_T5_FULLCH0 V_T5_FULLCH0(1U) + +#define S_T5_VALIDCH0 7 +#define V_T5_VALIDCH0(x) ((x) << S_T5_VALIDCH0) +#define F_T5_VALIDCH0 V_T5_VALIDCH0(1U) + +#define S_T5_DATACH0 0 +#define M_T5_DATACH0 0x7fU +#define V_T5_DATACH0(x) ((x) << S_T5_DATACH0) +#define G_T5_DATACH0(x) (((x) >> S_T5_DATACH0) & M_T5_DATACH0) + #define A_MPS_TX_DEBUG_REG_TP2TX_32 0x9448 #define S_SOPCH3 31 @@ -23431,6 +33009,50 @@ #define V_DATACH2(x) ((x) << S_DATACH2) #define G_DATACH2(x) (((x) >> S_DATACH2) & M_DATACH2) +#define S_T5_SIZECH3 26 +#define M_T5_SIZECH3 0xfU +#define V_T5_SIZECH3(x) ((x) << S_T5_SIZECH3) +#define G_T5_SIZECH3(x) (((x) >> S_T5_SIZECH3) & M_T5_SIZECH3) + +#define S_T5_ERRCH3 25 +#define V_T5_ERRCH3(x) ((x) << S_T5_ERRCH3) +#define F_T5_ERRCH3 V_T5_ERRCH3(1U) + +#define S_T5_FULLCH3 24 +#define V_T5_FULLCH3(x) ((x) << S_T5_FULLCH3) +#define F_T5_FULLCH3 V_T5_FULLCH3(1U) + +#define S_T5_VALIDCH3 23 +#define V_T5_VALIDCH3(x) ((x) << S_T5_VALIDCH3) +#define F_T5_VALIDCH3 V_T5_VALIDCH3(1U) + +#define S_T5_DATACH3 16 +#define M_T5_DATACH3 0x7fU +#define V_T5_DATACH3(x) ((x) << S_T5_DATACH3) +#define G_T5_DATACH3(x) (((x) >> S_T5_DATACH3) & M_T5_DATACH3) + +#define S_T5_SIZECH2 10 +#define M_T5_SIZECH2 0xfU +#define V_T5_SIZECH2(x) ((x) << S_T5_SIZECH2) +#define G_T5_SIZECH2(x) (((x) >> S_T5_SIZECH2) & M_T5_SIZECH2) + +#define S_T5_ERRCH2 9 +#define V_T5_ERRCH2(x) ((x) << S_T5_ERRCH2) +#define F_T5_ERRCH2 V_T5_ERRCH2(1U) + +#define S_T5_FULLCH2 8 +#define V_T5_FULLCH2(x) ((x) << S_T5_FULLCH2) +#define F_T5_FULLCH2 V_T5_FULLCH2(1U) + +#define S_T5_VALIDCH2 7 +#define V_T5_VALIDCH2(x) ((x) << S_T5_VALIDCH2) +#define F_T5_VALIDCH2 V_T5_VALIDCH2(1U) + +#define S_T5_DATACH2 0 +#define M_T5_DATACH2 0x7fU +#define V_T5_DATACH2(x) ((x) << S_T5_DATACH2) +#define G_T5_DATACH2(x) (((x) >> S_T5_DATACH2) & M_T5_DATACH2) + #define A_MPS_TX_DEBUG_REG_TX2MAC_10 0x944c #define S_SOPPT1 31 @@ -23493,6 +33115,50 @@ #define V_DATAPT0(x) ((x) << S_DATAPT0) #define G_DATAPT0(x) (((x) >> S_DATAPT0) & M_DATAPT0) +#define S_T5_SIZEPT1 26 +#define M_T5_SIZEPT1 0xfU +#define V_T5_SIZEPT1(x) ((x) << S_T5_SIZEPT1) +#define G_T5_SIZEPT1(x) (((x) >> S_T5_SIZEPT1) & M_T5_SIZEPT1) + +#define S_T5_ERRPT1 25 +#define V_T5_ERRPT1(x) ((x) << S_T5_ERRPT1) +#define F_T5_ERRPT1 V_T5_ERRPT1(1U) + +#define S_T5_FULLPT1 24 +#define V_T5_FULLPT1(x) ((x) << S_T5_FULLPT1) +#define F_T5_FULLPT1 V_T5_FULLPT1(1U) + +#define S_T5_VALIDPT1 23 +#define V_T5_VALIDPT1(x) ((x) << S_T5_VALIDPT1) +#define F_T5_VALIDPT1 V_T5_VALIDPT1(1U) + +#define S_T5_DATAPT1 16 +#define M_T5_DATAPT1 0x7fU +#define V_T5_DATAPT1(x) ((x) << S_T5_DATAPT1) +#define G_T5_DATAPT1(x) (((x) >> S_T5_DATAPT1) & M_T5_DATAPT1) + +#define S_T5_SIZEPT0 10 +#define M_T5_SIZEPT0 0xfU +#define V_T5_SIZEPT0(x) ((x) << S_T5_SIZEPT0) +#define G_T5_SIZEPT0(x) (((x) >> S_T5_SIZEPT0) & M_T5_SIZEPT0) + +#define S_T5_ERRPT0 9 +#define V_T5_ERRPT0(x) ((x) << S_T5_ERRPT0) +#define F_T5_ERRPT0 V_T5_ERRPT0(1U) + +#define S_T5_FULLPT0 8 +#define V_T5_FULLPT0(x) ((x) << S_T5_FULLPT0) +#define F_T5_FULLPT0 V_T5_FULLPT0(1U) + +#define S_T5_VALIDPT0 7 +#define V_T5_VALIDPT0(x) ((x) << S_T5_VALIDPT0) +#define F_T5_VALIDPT0 V_T5_VALIDPT0(1U) + +#define S_T5_DATAPT0 0 +#define M_T5_DATAPT0 0x7fU +#define V_T5_DATAPT0(x) ((x) << S_T5_DATAPT0) +#define G_T5_DATAPT0(x) (((x) >> S_T5_DATAPT0) & M_T5_DATAPT0) + #define A_MPS_TX_DEBUG_REG_TX2MAC_32 0x9450 #define S_SOPPT3 31 @@ -23555,6 +33221,50 @@ #define V_DATAPT2(x) ((x) << S_DATAPT2) #define G_DATAPT2(x) (((x) >> S_DATAPT2) & M_DATAPT2) +#define S_T5_SIZEPT3 26 +#define M_T5_SIZEPT3 0xfU +#define V_T5_SIZEPT3(x) ((x) << S_T5_SIZEPT3) +#define G_T5_SIZEPT3(x) (((x) >> S_T5_SIZEPT3) & M_T5_SIZEPT3) + +#define S_T5_ERRPT3 25 +#define V_T5_ERRPT3(x) ((x) << S_T5_ERRPT3) +#define F_T5_ERRPT3 V_T5_ERRPT3(1U) + +#define S_T5_FULLPT3 24 +#define V_T5_FULLPT3(x) ((x) << S_T5_FULLPT3) +#define F_T5_FULLPT3 V_T5_FULLPT3(1U) + +#define S_T5_VALIDPT3 23 +#define V_T5_VALIDPT3(x) ((x) << S_T5_VALIDPT3) +#define F_T5_VALIDPT3 V_T5_VALIDPT3(1U) + +#define S_T5_DATAPT3 16 +#define M_T5_DATAPT3 0x7fU +#define V_T5_DATAPT3(x) ((x) << S_T5_DATAPT3) +#define G_T5_DATAPT3(x) (((x) >> S_T5_DATAPT3) & M_T5_DATAPT3) + +#define S_T5_SIZEPT2 10 +#define M_T5_SIZEPT2 0xfU +#define V_T5_SIZEPT2(x) ((x) << S_T5_SIZEPT2) +#define G_T5_SIZEPT2(x) (((x) >> S_T5_SIZEPT2) & M_T5_SIZEPT2) + +#define S_T5_ERRPT2 9 +#define V_T5_ERRPT2(x) ((x) << S_T5_ERRPT2) +#define F_T5_ERRPT2 V_T5_ERRPT2(1U) + +#define S_T5_FULLPT2 8 +#define V_T5_FULLPT2(x) ((x) << S_T5_FULLPT2) +#define F_T5_FULLPT2 V_T5_FULLPT2(1U) + +#define S_T5_VALIDPT2 7 +#define V_T5_VALIDPT2(x) ((x) << S_T5_VALIDPT2) +#define F_T5_VALIDPT2 V_T5_VALIDPT2(1U) + +#define S_T5_DATAPT2 0 +#define M_T5_DATAPT2 0x7fU +#define V_T5_DATAPT2(x) ((x) << S_T5_DATAPT2) +#define G_T5_DATAPT2(x) (((x) >> S_T5_DATAPT2) & M_T5_DATAPT2) + #define A_MPS_TX_SGE_CH_PAUSE_IGNR 0x9454 #define S_SGEPAUSEIGNR 0 @@ -24060,6 +33770,14 @@ #define A_MPS_STAT_STOP_UPD_RX_VF_32_63 0x96ec #define A_MPS_STAT_STOP_UPD_RX_VF_64_95 0x96f0 #define A_MPS_STAT_STOP_UPD_RX_VF_96_127 0x96f4 +#define A_MPS_STAT_STOP_UPD_RX_VF_128_159 0x96f8 +#define A_MPS_STAT_STOP_UPD_RX_VF_160_191 0x96fc +#define A_MPS_STAT_STOP_UPD_RX_VF_192_223 0x9700 +#define A_MPS_STAT_STOP_UPD_RX_VF_224_255 0x9704 +#define A_MPS_STAT_STOP_UPD_TX_VF_128_159 0x9710 +#define A_MPS_STAT_STOP_UPD_TX_VF_160_191 0x9714 +#define A_MPS_STAT_STOP_UPD_TX_VF_192_223 0x9718 +#define A_MPS_STAT_STOP_UPD_TX_VF_224_255 0x971c #define A_MPS_TRC_CFG 0x9800 #define S_TRCFIFOEMPTY 4 @@ -24282,9 +34000,164 @@ #define V_VFFILTDATA(x) ((x) << S_VFFILTDATA) #define G_VFFILTDATA(x) (((x) >> S_VFFILTDATA) & M_VFFILTDATA) +#define S_T6_TRCMPS2TP_MACONLY 22 +#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY) +#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U) + +#define S_T6_TRCALLMPS2TP 21 +#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP) +#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U) + +#define S_T6_TRCALLTP2MPS 20 +#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS) +#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U) + +#define S_T6_TRCALLVF 19 +#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF) +#define F_T6_TRCALLVF V_T6_TRCALLVF(1U) + +#define S_T6_TRC_OFLD_EN 18 +#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN) +#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U) + +#define S_T6_VFFILTEN 17 +#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN) +#define F_T6_VFFILTEN V_T6_VFFILTEN(1U) + +#define S_T6_VFFILTMASK 9 +#define M_T6_VFFILTMASK 0xffU +#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK) +#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK) + +#define S_T6_VFFILTVALID 8 +#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID) +#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U) + +#define S_T6_VFFILTDATA 0 +#define M_T6_VFFILTDATA 0xffU +#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA) +#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA) + #define A_MPS_TRC_VF_OFF_FILTER_1 0xa014 + +#define S_T6_TRCMPS2TP_MACONLY 22 +#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY) +#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U) + +#define S_T6_TRCALLMPS2TP 21 +#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP) +#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U) + +#define S_T6_TRCALLTP2MPS 20 +#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS) +#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U) + +#define S_T6_TRCALLVF 19 +#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF) +#define F_T6_TRCALLVF V_T6_TRCALLVF(1U) + +#define S_T6_TRC_OFLD_EN 18 +#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN) +#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U) + +#define S_T6_VFFILTEN 17 +#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN) +#define F_T6_VFFILTEN V_T6_VFFILTEN(1U) + +#define S_T6_VFFILTMASK 9 +#define M_T6_VFFILTMASK 0xffU +#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK) +#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK) + +#define S_T6_VFFILTVALID 8 +#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID) +#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U) + +#define S_T6_VFFILTDATA 0 +#define M_T6_VFFILTDATA 0xffU +#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA) +#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA) + #define A_MPS_TRC_VF_OFF_FILTER_2 0xa018 + +#define S_T6_TRCMPS2TP_MACONLY 22 +#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY) +#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U) + +#define S_T6_TRCALLMPS2TP 21 +#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP) +#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U) + +#define S_T6_TRCALLTP2MPS 20 +#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS) +#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U) + +#define S_T6_TRCALLVF 19 +#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF) +#define F_T6_TRCALLVF V_T6_TRCALLVF(1U) + +#define S_T6_TRC_OFLD_EN 18 +#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN) +#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U) + +#define S_T6_VFFILTEN 17 +#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN) +#define F_T6_VFFILTEN V_T6_VFFILTEN(1U) + +#define S_T6_VFFILTMASK 9 +#define M_T6_VFFILTMASK 0xffU +#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK) +#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK) + +#define S_T6_VFFILTVALID 8 +#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID) +#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U) + +#define S_T6_VFFILTDATA 0 +#define M_T6_VFFILTDATA 0xffU +#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA) +#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA) + #define A_MPS_TRC_VF_OFF_FILTER_3 0xa01c + +#define S_T6_TRCMPS2TP_MACONLY 22 +#define V_T6_TRCMPS2TP_MACONLY(x) ((x) << S_T6_TRCMPS2TP_MACONLY) +#define F_T6_TRCMPS2TP_MACONLY V_T6_TRCMPS2TP_MACONLY(1U) + +#define S_T6_TRCALLMPS2TP 21 +#define V_T6_TRCALLMPS2TP(x) ((x) << S_T6_TRCALLMPS2TP) +#define F_T6_TRCALLMPS2TP V_T6_TRCALLMPS2TP(1U) + +#define S_T6_TRCALLTP2MPS 20 +#define V_T6_TRCALLTP2MPS(x) ((x) << S_T6_TRCALLTP2MPS) +#define F_T6_TRCALLTP2MPS V_T6_TRCALLTP2MPS(1U) + +#define S_T6_TRCALLVF 19 +#define V_T6_TRCALLVF(x) ((x) << S_T6_TRCALLVF) +#define F_T6_TRCALLVF V_T6_TRCALLVF(1U) + +#define S_T6_TRC_OFLD_EN 18 +#define V_T6_TRC_OFLD_EN(x) ((x) << S_T6_TRC_OFLD_EN) +#define F_T6_TRC_OFLD_EN V_T6_TRC_OFLD_EN(1U) + +#define S_T6_VFFILTEN 17 +#define V_T6_VFFILTEN(x) ((x) << S_T6_VFFILTEN) +#define F_T6_VFFILTEN V_T6_VFFILTEN(1U) + +#define S_T6_VFFILTMASK 9 +#define M_T6_VFFILTMASK 0xffU +#define V_T6_VFFILTMASK(x) ((x) << S_T6_VFFILTMASK) +#define G_T6_VFFILTMASK(x) (((x) >> S_T6_VFFILTMASK) & M_T6_VFFILTMASK) + +#define S_T6_VFFILTVALID 8 +#define V_T6_VFFILTVALID(x) ((x) << S_T6_VFFILTVALID) +#define F_T6_VFFILTVALID V_T6_VFFILTVALID(1U) + +#define S_T6_VFFILTDATA 0 +#define M_T6_VFFILTDATA 0xffU +#define V_T6_VFFILTDATA(x) ((x) << S_T6_VFFILTDATA) +#define G_T6_VFFILTDATA(x) (((x) >> S_T6_VFFILTDATA) & M_T6_VFFILTDATA) + #define A_MPS_TRC_CGEN 0xa020 #define S_MPSTRCCGEN 0 @@ -24314,6 +34187,18 @@ #define V_VLANCLSEN(x) ((x) << S_VLANCLSEN) #define F_VLANCLSEN V_VLANCLSEN(1U) +#define S_VLANCLSEN_IN 7 +#define V_VLANCLSEN_IN(x) ((x) << S_VLANCLSEN_IN) +#define F_VLANCLSEN_IN V_VLANCLSEN_IN(1U) + +#define S_DISTCAMPARCHK 6 +#define V_DISTCAMPARCHK(x) ((x) << S_DISTCAMPARCHK) +#define F_DISTCAMPARCHK V_DISTCAMPARCHK(1U) + +#define S_VLANLKPEN 5 +#define V_VLANLKPEN(x) ((x) << S_VLANLKPEN) +#define F_VLANLKPEN V_VLANLKPEN(1U) + #define A_MPS_CLS_ARB_WEIGHT 0xd004 #define S_PLWEIGHT 16 @@ -24331,6 +34216,8 @@ #define V_LPBKWEIGHT(x) ((x) << S_LPBKWEIGHT) #define G_LPBKWEIGHT(x) (((x) >> S_LPBKWEIGHT) & M_LPBKWEIGHT) +#define A_MPS_CLS_NCSI_ETH_TYPE 0xd008 +#define A_MPS_CLS_NCSI_ETH_TYPE_EN 0xd00c #define A_MPS_CLS_BMC_MAC_ADDR_L 0xd010 #define A_MPS_CLS_BMC_MAC_ADDR_H 0xd014 #define A_MPS_CLS_BMC_VLAN 0xd018 @@ -24399,6 +34286,30 @@ #define V_CLS_MATCH(x) ((x) << S_CLS_MATCH) #define G_CLS_MATCH(x) (((x) >> S_CLS_MATCH) & M_CLS_MATCH) +#define S_CLS_SPARE 28 +#define M_CLS_SPARE 0xfU +#define V_CLS_SPARE(x) ((x) << S_CLS_SPARE) +#define G_CLS_SPARE(x) (((x) >> S_CLS_SPARE) & M_CLS_SPARE) + +#define S_T6_CLS_PRIORITY 25 +#define M_T6_CLS_PRIORITY 0x7U +#define V_T6_CLS_PRIORITY(x) ((x) << S_T6_CLS_PRIORITY) +#define G_T6_CLS_PRIORITY(x) (((x) >> S_T6_CLS_PRIORITY) & M_T6_CLS_PRIORITY) + +#define S_T6_CLS_REPLICATE 24 +#define V_T6_CLS_REPLICATE(x) ((x) << S_T6_CLS_REPLICATE) +#define F_T6_CLS_REPLICATE V_T6_CLS_REPLICATE(1U) + +#define S_T6_CLS_INDEX 15 +#define M_T6_CLS_INDEX 0x1ffU +#define V_T6_CLS_INDEX(x) ((x) << S_T6_CLS_INDEX) +#define G_T6_CLS_INDEX(x) (((x) >> S_T6_CLS_INDEX) & M_T6_CLS_INDEX) + +#define S_T6_CLS_VF 7 +#define M_T6_CLS_VF 0xffU +#define V_T6_CLS_VF(x) ((x) << S_T6_CLS_VF) +#define G_T6_CLS_VF(x) (((x) >> S_T6_CLS_VF) & M_T6_CLS_VF) + #define A_MPS_CLS_PL_TEST_CTL 0xd038 #define S_PLTESTCTL 0 @@ -24411,6 +34322,108 @@ #define V_PRTBMCCTL(x) ((x) << S_PRTBMCCTL) #define F_PRTBMCCTL V_PRTBMCCTL(1U) +#define A_MPS_CLS_MATCH_CNT_TCAM 0xd100 +#define A_MPS_CLS_MATCH_CNT_HASH 0xd104 +#define A_MPS_CLS_MATCH_CNT_BCAST 0xd108 +#define A_MPS_CLS_MATCH_CNT_BMC 0xd10c +#define A_MPS_CLS_MATCH_CNT_PROM 0xd110 +#define A_MPS_CLS_MATCH_CNT_HPROM 0xd114 +#define A_MPS_CLS_MISS_CNT 0xd118 +#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_L 0xd200 +#define A_MPS_CLS_REQUEST_TRACE_MAC_DA_H 0xd204 + +#define S_CLSTRCMACDAHI 0 +#define M_CLSTRCMACDAHI 0xffffU +#define V_CLSTRCMACDAHI(x) ((x) << S_CLSTRCMACDAHI) +#define G_CLSTRCMACDAHI(x) (((x) >> S_CLSTRCMACDAHI) & M_CLSTRCMACDAHI) + +#define A_MPS_CLS_REQUEST_TRACE_MAC_SA_L 0xd208 +#define A_MPS_CLS_REQUEST_TRACE_MAC_SA_H 0xd20c + +#define S_CLSTRCMACSAHI 0 +#define M_CLSTRCMACSAHI 0xffffU +#define V_CLSTRCMACSAHI(x) ((x) << S_CLSTRCMACSAHI) +#define G_CLSTRCMACSAHI(x) (((x) >> S_CLSTRCMACSAHI) & M_CLSTRCMACSAHI) + +#define A_MPS_CLS_REQUEST_TRACE_PORT_VLAN 0xd210 + +#define S_CLSTRCVLANVLD 31 +#define V_CLSTRCVLANVLD(x) ((x) << S_CLSTRCVLANVLD) +#define F_CLSTRCVLANVLD V_CLSTRCVLANVLD(1U) + +#define S_CLSTRCVLANID 16 +#define M_CLSTRCVLANID 0xfffU +#define V_CLSTRCVLANID(x) ((x) << S_CLSTRCVLANID) +#define G_CLSTRCVLANID(x) (((x) >> S_CLSTRCVLANID) & M_CLSTRCVLANID) + +#define S_CLSTRCREQPORT 0 +#define M_CLSTRCREQPORT 0xfU +#define V_CLSTRCREQPORT(x) ((x) << S_CLSTRCREQPORT) +#define G_CLSTRCREQPORT(x) (((x) >> S_CLSTRCREQPORT) & M_CLSTRCREQPORT) + +#define A_MPS_CLS_REQUEST_TRACE_ENCAP 0xd214 + +#define S_CLSTRCLKPTYPE 31 +#define V_CLSTRCLKPTYPE(x) ((x) << S_CLSTRCLKPTYPE) +#define F_CLSTRCLKPTYPE V_CLSTRCLKPTYPE(1U) + +#define S_CLSTRCDIPHIT 30 +#define V_CLSTRCDIPHIT(x) ((x) << S_CLSTRCDIPHIT) +#define F_CLSTRCDIPHIT V_CLSTRCDIPHIT(1U) + +#define S_CLSTRCVNI 0 +#define M_CLSTRCVNI 0xffffffU +#define V_CLSTRCVNI(x) ((x) << S_CLSTRCVNI) +#define G_CLSTRCVNI(x) (((x) >> S_CLSTRCVNI) & M_CLSTRCVNI) + +#define A_MPS_CLS_RESULT_TRACE 0xd300 + +#define S_CLSTRCPORTNUM 31 +#define V_CLSTRCPORTNUM(x) ((x) << S_CLSTRCPORTNUM) +#define F_CLSTRCPORTNUM V_CLSTRCPORTNUM(1U) + +#define S_CLSTRCPRIORITY 28 +#define M_CLSTRCPRIORITY 0x7U +#define V_CLSTRCPRIORITY(x) ((x) << S_CLSTRCPRIORITY) +#define G_CLSTRCPRIORITY(x) (((x) >> S_CLSTRCPRIORITY) & M_CLSTRCPRIORITY) + +#define S_CLSTRCMULTILISTEN 27 +#define V_CLSTRCMULTILISTEN(x) ((x) << S_CLSTRCMULTILISTEN) +#define F_CLSTRCMULTILISTEN V_CLSTRCMULTILISTEN(1U) + +#define S_CLSTRCREPLICATE 26 +#define V_CLSTRCREPLICATE(x) ((x) << S_CLSTRCREPLICATE) +#define F_CLSTRCREPLICATE V_CLSTRCREPLICATE(1U) + +#define S_CLSTRCPORTMAP 24 +#define M_CLSTRCPORTMAP 0x3U +#define V_CLSTRCPORTMAP(x) ((x) << S_CLSTRCPORTMAP) +#define G_CLSTRCPORTMAP(x) (((x) >> S_CLSTRCPORTMAP) & M_CLSTRCPORTMAP) + +#define S_CLSTRCMATCH 21 +#define M_CLSTRCMATCH 0x7U +#define V_CLSTRCMATCH(x) ((x) << S_CLSTRCMATCH) +#define G_CLSTRCMATCH(x) (((x) >> S_CLSTRCMATCH) & M_CLSTRCMATCH) + +#define S_CLSTRCINDEX 12 +#define M_CLSTRCINDEX 0x1ffU +#define V_CLSTRCINDEX(x) ((x) << S_CLSTRCINDEX) +#define G_CLSTRCINDEX(x) (((x) >> S_CLSTRCINDEX) & M_CLSTRCINDEX) + +#define S_CLSTRCVF_VLD 11 +#define V_CLSTRCVF_VLD(x) ((x) << S_CLSTRCVF_VLD) +#define F_CLSTRCVF_VLD V_CLSTRCVF_VLD(1U) + +#define S_CLSTRCPF 3 +#define M_CLSTRCPF 0xffU +#define V_CLSTRCPF(x) ((x) << S_CLSTRCPF) +#define G_CLSTRCPF(x) (((x) >> S_CLSTRCPF) & M_CLSTRCPF) + +#define S_CLSTRCVF 0 +#define M_CLSTRCVF 0x7U +#define V_CLSTRCVF(x) ((x) << S_CLSTRCVF) +#define G_CLSTRCVF(x) (((x) >> S_CLSTRCVF) & M_CLSTRCVF) + #define A_MPS_CLS_VLAN_TABLE 0xdfc0 #define S_VLAN_MASK 16 @@ -24470,6 +34483,73 @@ #define F_SRAM_VLD V_SRAM_VLD(1U) #define A_MPS_T5_CLS_SRAM_L 0xe000 + +#define S_T6_DISENCAPOUTERRPLCT 31 +#define V_T6_DISENCAPOUTERRPLCT(x) ((x) << S_T6_DISENCAPOUTERRPLCT) +#define F_T6_DISENCAPOUTERRPLCT V_T6_DISENCAPOUTERRPLCT(1U) + +#define S_T6_DISENCAP 30 +#define V_T6_DISENCAP(x) ((x) << S_T6_DISENCAP) +#define F_T6_DISENCAP V_T6_DISENCAP(1U) + +#define S_T6_MULTILISTEN3 29 +#define V_T6_MULTILISTEN3(x) ((x) << S_T6_MULTILISTEN3) +#define F_T6_MULTILISTEN3 V_T6_MULTILISTEN3(1U) + +#define S_T6_MULTILISTEN2 28 +#define V_T6_MULTILISTEN2(x) ((x) << S_T6_MULTILISTEN2) +#define F_T6_MULTILISTEN2 V_T6_MULTILISTEN2(1U) + +#define S_T6_MULTILISTEN1 27 +#define V_T6_MULTILISTEN1(x) ((x) << S_T6_MULTILISTEN1) +#define F_T6_MULTILISTEN1 V_T6_MULTILISTEN1(1U) + +#define S_T6_MULTILISTEN0 26 +#define V_T6_MULTILISTEN0(x) ((x) << S_T6_MULTILISTEN0) +#define F_T6_MULTILISTEN0 V_T6_MULTILISTEN0(1U) + +#define S_T6_SRAM_PRIO3 23 +#define M_T6_SRAM_PRIO3 0x7U +#define V_T6_SRAM_PRIO3(x) ((x) << S_T6_SRAM_PRIO3) +#define G_T6_SRAM_PRIO3(x) (((x) >> S_T6_SRAM_PRIO3) & M_T6_SRAM_PRIO3) + +#define S_T6_SRAM_PRIO2 20 +#define M_T6_SRAM_PRIO2 0x7U +#define V_T6_SRAM_PRIO2(x) ((x) << S_T6_SRAM_PRIO2) +#define G_T6_SRAM_PRIO2(x) (((x) >> S_T6_SRAM_PRIO2) & M_T6_SRAM_PRIO2) + +#define S_T6_SRAM_PRIO1 17 +#define M_T6_SRAM_PRIO1 0x7U +#define V_T6_SRAM_PRIO1(x) ((x) << S_T6_SRAM_PRIO1) +#define G_T6_SRAM_PRIO1(x) (((x) >> S_T6_SRAM_PRIO1) & M_T6_SRAM_PRIO1) + +#define S_T6_SRAM_PRIO0 14 +#define M_T6_SRAM_PRIO0 0x7U +#define V_T6_SRAM_PRIO0(x) ((x) << S_T6_SRAM_PRIO0) +#define G_T6_SRAM_PRIO0(x) (((x) >> S_T6_SRAM_PRIO0) & M_T6_SRAM_PRIO0) + +#define S_T6_SRAM_VLD 13 +#define V_T6_SRAM_VLD(x) ((x) << S_T6_SRAM_VLD) +#define F_T6_SRAM_VLD V_T6_SRAM_VLD(1U) + +#define S_T6_REPLICATE 12 +#define V_T6_REPLICATE(x) ((x) << S_T6_REPLICATE) +#define F_T6_REPLICATE V_T6_REPLICATE(1U) + +#define S_T6_PF 9 +#define M_T6_PF 0x7U +#define V_T6_PF(x) ((x) << S_T6_PF) +#define G_T6_PF(x) (((x) >> S_T6_PF) & M_T6_PF) + +#define S_T6_VF_VALID 8 +#define V_T6_VF_VALID(x) ((x) << S_T6_VF_VALID) +#define F_T6_VF_VALID V_T6_VF_VALID(1U) + +#define S_T6_VF 0 +#define M_T6_VF 0xffU +#define V_T6_VF(x) ((x) << S_T6_VF) +#define G_T6_VF(x) (((x) >> S_T6_VF) & M_T6_VF) + #define A_MPS_CLS_SRAM_H 0xe004 #define S_MACPARITY1 9 @@ -24491,7 +34571,13 @@ #define G_PORTMAP(x) (((x) >> S_PORTMAP) & M_PORTMAP) #define A_MPS_T5_CLS_SRAM_H 0xe004 + +#define S_MACPARITY2 10 +#define V_MACPARITY2(x) ((x) << S_MACPARITY2) +#define F_MACPARITY2 V_MACPARITY2(1U) + #define A_MPS_CLS_TCAM_Y_L 0xf000 +#define A_MPS_CLS_TCAM_DATA0 0xf000 #define A_MPS_CLS_TCAM_Y_H 0xf004 #define S_TCAMYH 0 @@ -24499,7 +34585,65 @@ #define V_TCAMYH(x) ((x) << S_TCAMYH) #define G_TCAMYH(x) (((x) >> S_TCAMYH) & M_TCAMYH) +#define A_MPS_CLS_TCAM_DATA1 0xf004 + +#define S_VIDL 16 +#define M_VIDL 0xffffU +#define V_VIDL(x) ((x) << S_VIDL) +#define G_VIDL(x) (((x) >> S_VIDL) & M_VIDL) + +#define S_DMACH 0 +#define M_DMACH 0xffffU +#define V_DMACH(x) ((x) << S_DMACH) +#define G_DMACH(x) (((x) >> S_DMACH) & M_DMACH) + #define A_MPS_CLS_TCAM_X_L 0xf008 +#define A_MPS_CLS_TCAM_DATA2_CTL 0xf008 + +#define S_CTLCMDTYPE 31 +#define V_CTLCMDTYPE(x) ((x) << S_CTLCMDTYPE) +#define F_CTLCMDTYPE V_CTLCMDTYPE(1U) + +#define S_CTLREQID 30 +#define V_CTLREQID(x) ((x) << S_CTLREQID) +#define F_CTLREQID V_CTLREQID(1U) + +#define S_CTLTCAMSEL 25 +#define V_CTLTCAMSEL(x) ((x) << S_CTLTCAMSEL) +#define F_CTLTCAMSEL V_CTLTCAMSEL(1U) + +#define S_CTLTCAMINDEX 17 +#define M_CTLTCAMINDEX 0xffU +#define V_CTLTCAMINDEX(x) ((x) << S_CTLTCAMINDEX) +#define G_CTLTCAMINDEX(x) (((x) >> S_CTLTCAMINDEX) & M_CTLTCAMINDEX) + +#define S_CTLXYBITSEL 16 +#define V_CTLXYBITSEL(x) ((x) << S_CTLXYBITSEL) +#define F_CTLXYBITSEL V_CTLXYBITSEL(1U) + +#define S_DATAPORTNUM 12 +#define M_DATAPORTNUM 0xfU +#define V_DATAPORTNUM(x) ((x) << S_DATAPORTNUM) +#define G_DATAPORTNUM(x) (((x) >> S_DATAPORTNUM) & M_DATAPORTNUM) + +#define S_DATALKPTYPE 10 +#define M_DATALKPTYPE 0x3U +#define V_DATALKPTYPE(x) ((x) << S_DATALKPTYPE) +#define G_DATALKPTYPE(x) (((x) >> S_DATALKPTYPE) & M_DATALKPTYPE) + +#define S_DATADIPHIT 8 +#define V_DATADIPHIT(x) ((x) << S_DATADIPHIT) +#define F_DATADIPHIT V_DATADIPHIT(1U) + +#define S_DATAVIDH2 7 +#define V_DATAVIDH2(x) ((x) << S_DATAVIDH2) +#define F_DATAVIDH2 V_DATAVIDH2(1U) + +#define S_DATAVIDH1 0 +#define M_DATAVIDH1 0x7fU +#define V_DATAVIDH1(x) ((x) << S_DATAVIDH1) +#define G_DATAVIDH1(x) (((x) >> S_DATAVIDH1) & M_DATAVIDH1) + #define A_MPS_CLS_TCAM_X_H 0xf00c #define S_TCAMXH 0 @@ -24507,6 +34651,12 @@ #define V_TCAMXH(x) ((x) << S_TCAMXH) #define G_TCAMXH(x) (((x) >> S_TCAMXH) & M_TCAMXH) +#define A_MPS_CLS_TCAM_RDATA0_REQ_ID0 0xf010 +#define A_MPS_CLS_TCAM_RDATA1_REQ_ID0 0xf014 +#define A_MPS_CLS_TCAM_RDATA2_REQ_ID0 0xf018 +#define A_MPS_CLS_TCAM_RDATA0_REQ_ID1 0xf020 +#define A_MPS_CLS_TCAM_RDATA1_REQ_ID1 0xf024 +#define A_MPS_CLS_TCAM_RDATA2_REQ_ID1 0xf028 #define A_MPS_RX_CTL 0x11000 #define S_FILT_VLAN_SEL 17 @@ -24565,7 +34715,15 @@ #define V_CNT(x) ((x) << S_CNT) #define G_CNT(x) (((x) >> S_CNT) & M_CNT) +#define A_MPS_RX_FIFO_0_CTL 0x11008 + +#define S_DEST_SELECT 0 +#define M_DEST_SELECT 0xfU +#define V_DEST_SELECT(x) ((x) << S_DEST_SELECT) +#define G_DEST_SELECT(x) (((x) >> S_DEST_SELECT) & M_DEST_SELECT) + #define A_MPS_RX_PKT_FL 0x1100c +#define A_MPS_RX_FIFO_1_CTL 0x1100c #define A_MPS_RX_PG_RSV0 0x11010 #define S_CLR_INTR 31 @@ -24596,7 +34754,9 @@ #define V_T5_ALLOC(x) ((x) << S_T5_ALLOC) #define G_T5_ALLOC(x) (((x) >> S_T5_ALLOC) & M_T5_ALLOC) +#define A_MPS_RX_FIFO_2_CTL 0x11010 #define A_MPS_RX_PG_RSV1 0x11014 +#define A_MPS_RX_FIFO_3_CTL 0x11014 #define A_MPS_RX_PG_RSV2 0x11018 #define A_MPS_RX_PG_RSV3 0x1101c #define A_MPS_RX_PG_RSV4 0x11020 @@ -24671,6 +34831,11 @@ #define V_T5_TH(x) ((x) << S_T5_TH) #define G_T5_TH(x) (((x) >> S_T5_TH) & M_T5_TH) +#define S_T6_TH 0 +#define M_T6_TH 0x7ffU +#define V_T6_TH(x) ((x) << S_T6_TH) +#define G_T6_TH(x) (((x) >> S_T6_TH) & M_T6_TH) + #define A_MPS_RX_PG_HYST_BG1 0x1104c #define A_MPS_RX_PG_HYST_BG2 0x11050 #define A_MPS_RX_PG_HYST_BG3 0x11054 @@ -24817,8 +34982,22 @@ #define V_CDM0(x) ((x) << S_CDM0) #define F_CDM0 V_CDM0(1U) +#define S_T6_INT_ERR_INT 24 +#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT) +#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U) + #define A_MPS_RX_PERR_INT_ENABLE 0x11078 + +#define S_T6_INT_ERR_INT 24 +#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT) +#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U) + #define A_MPS_RX_PERR_ENABLE 0x1107c + +#define S_T6_INT_ERR_INT 24 +#define V_T6_INT_ERR_INT(x) ((x) << S_T6_INT_ERR_INT) +#define F_T6_INT_ERR_INT V_T6_INT_ERR_INT(1U) + #define A_MPS_RX_PERR_INJECT 0x11080 #define A_MPS_RX_FUNC_INT_CAUSE 0x11084 @@ -24903,6 +35082,12 @@ #define A_MPS_RX_PAUSE_GEN_TH_1 0x11090 #define A_MPS_RX_PAUSE_GEN_TH_2 0x11094 #define A_MPS_RX_PAUSE_GEN_TH_3 0x11098 +#define A_MPS_RX_REPL_CTL 0x11098 + +#define S_INDEX_SEL 0 +#define V_INDEX_SEL(x) ((x) << S_INDEX_SEL) +#define F_INDEX_SEL V_INDEX_SEL(1U) + #define A_MPS_RX_PPP_ATRB 0x1109c #define S_ETYPE 16 @@ -24938,7 +35123,9 @@ #define A_MPS_RX_PT_ARB1 0x110ac #define A_MPS_RX_PT_ARB2 0x110b0 #define A_MPS_RX_PT_ARB3 0x110b4 +#define A_T6_MPS_PF_OUT_EN 0x110b4 #define A_MPS_RX_PT_ARB4 0x110b8 +#define A_T6_MPS_BMC_MTU 0x110b8 #define A_MPS_PF_OUT_EN 0x110bc #define S_OUTEN 0 @@ -24946,6 +35133,7 @@ #define V_OUTEN(x) ((x) << S_OUTEN) #define G_OUTEN(x) (((x) >> S_OUTEN) & M_OUTEN) +#define A_T6_MPS_BMC_PKT_CNT 0x110bc #define A_MPS_BMC_MTU 0x110c0 #define S_MTU 0 @@ -24953,8 +35141,22 @@ #define V_MTU(x) ((x) << S_MTU) #define G_MTU(x) (((x) >> S_MTU) & M_MTU) +#define A_T6_MPS_BMC_BYTE_CNT 0x110c0 #define A_MPS_BMC_PKT_CNT 0x110c4 +#define A_T6_MPS_PFVF_ATRB_CTL 0x110c4 + +#define S_T6_PFVF 0 +#define M_T6_PFVF 0x1ffU +#define V_T6_PFVF(x) ((x) << S_T6_PFVF) +#define G_T6_PFVF(x) (((x) >> S_T6_PFVF) & M_T6_PFVF) + #define A_MPS_BMC_BYTE_CNT 0x110c8 +#define A_T6_MPS_PFVF_ATRB 0x110c8 + +#define S_FULL_FRAME_MODE 14 +#define V_FULL_FRAME_MODE(x) ((x) << S_FULL_FRAME_MODE) +#define F_FULL_FRAME_MODE V_FULL_FRAME_MODE(1U) + #define A_MPS_PFVF_ATRB_CTL 0x110cc #define S_RD_WRN 31 @@ -24966,6 +35168,7 @@ #define V_PFVF(x) ((x) << S_PFVF) #define G_PFVF(x) (((x) >> S_PFVF) & M_PFVF) +#define A_T6_MPS_PFVF_ATRB_FLTR0 0x110cc #define A_MPS_PFVF_ATRB 0x110d0 #define S_ATTR_PF 28 @@ -24985,6 +35188,7 @@ #define V_ATTR_MODE(x) ((x) << S_ATTR_MODE) #define F_ATTR_MODE V_ATTR_MODE(1U) +#define A_T6_MPS_PFVF_ATRB_FLTR1 0x110d0 #define A_MPS_PFVF_ATRB_FLTR0 0x110d4 #define S_VLAN_EN 16 @@ -24996,21 +35200,37 @@ #define V_VLAN_ID(x) ((x) << S_VLAN_ID) #define G_VLAN_ID(x) (((x) >> S_VLAN_ID) & M_VLAN_ID) +#define A_T6_MPS_PFVF_ATRB_FLTR2 0x110d4 #define A_MPS_PFVF_ATRB_FLTR1 0x110d8 +#define A_T6_MPS_PFVF_ATRB_FLTR3 0x110d8 #define A_MPS_PFVF_ATRB_FLTR2 0x110dc +#define A_T6_MPS_PFVF_ATRB_FLTR4 0x110dc #define A_MPS_PFVF_ATRB_FLTR3 0x110e0 +#define A_T6_MPS_PFVF_ATRB_FLTR5 0x110e0 #define A_MPS_PFVF_ATRB_FLTR4 0x110e4 +#define A_T6_MPS_PFVF_ATRB_FLTR6 0x110e4 #define A_MPS_PFVF_ATRB_FLTR5 0x110e8 +#define A_T6_MPS_PFVF_ATRB_FLTR7 0x110e8 #define A_MPS_PFVF_ATRB_FLTR6 0x110ec +#define A_T6_MPS_PFVF_ATRB_FLTR8 0x110ec #define A_MPS_PFVF_ATRB_FLTR7 0x110f0 +#define A_T6_MPS_PFVF_ATRB_FLTR9 0x110f0 #define A_MPS_PFVF_ATRB_FLTR8 0x110f4 +#define A_T6_MPS_PFVF_ATRB_FLTR10 0x110f4 #define A_MPS_PFVF_ATRB_FLTR9 0x110f8 +#define A_T6_MPS_PFVF_ATRB_FLTR11 0x110f8 #define A_MPS_PFVF_ATRB_FLTR10 0x110fc +#define A_T6_MPS_PFVF_ATRB_FLTR12 0x110fc #define A_MPS_PFVF_ATRB_FLTR11 0x11100 +#define A_T6_MPS_PFVF_ATRB_FLTR13 0x11100 #define A_MPS_PFVF_ATRB_FLTR12 0x11104 +#define A_T6_MPS_PFVF_ATRB_FLTR14 0x11104 #define A_MPS_PFVF_ATRB_FLTR13 0x11108 +#define A_T6_MPS_PFVF_ATRB_FLTR15 0x11108 #define A_MPS_PFVF_ATRB_FLTR14 0x1110c +#define A_T6_MPS_RPLC_MAP_CTL 0x1110c #define A_MPS_PFVF_ATRB_FLTR15 0x11110 +#define A_T6_MPS_PF_RPLCT_MAP 0x11110 #define A_MPS_RPLC_MAP_CTL 0x11114 #define S_RPLC_MAP_ADDR 0 @@ -25018,6 +35238,7 @@ #define V_RPLC_MAP_ADDR(x) ((x) << S_RPLC_MAP_ADDR) #define G_RPLC_MAP_ADDR(x) (((x) >> S_RPLC_MAP_ADDR) & M_RPLC_MAP_ADDR) +#define A_T6_MPS_VF_RPLCT_MAP0 0x11114 #define A_MPS_PF_RPLCT_MAP 0x11118 #define S_PF_EN 0 @@ -25025,8 +35246,11 @@ #define V_PF_EN(x) ((x) << S_PF_EN) #define G_PF_EN(x) (((x) >> S_PF_EN) & M_PF_EN) +#define A_T6_MPS_VF_RPLCT_MAP1 0x11118 #define A_MPS_VF_RPLCT_MAP0 0x1111c +#define A_T6_MPS_VF_RPLCT_MAP2 0x1111c #define A_MPS_VF_RPLCT_MAP1 0x11120 +#define A_T6_MPS_VF_RPLCT_MAP3 0x11120 #define A_MPS_VF_RPLCT_MAP2 0x11124 #define A_MPS_VF_RPLCT_MAP3 0x11128 #define A_MPS_MEM_DBG_CTL 0x1112c @@ -25362,6 +35586,309 @@ #define V_MPS_RX_CGEN_MAC_IN(x) ((x) << S_MPS_RX_CGEN_MAC_IN) #define G_MPS_RX_CGEN_MAC_IN(x) (((x) >> S_MPS_RX_CGEN_MAC_IN) & M_MPS_RX_CGEN_MAC_IN) +#define A_MPS_RX_MAC_BG_PG_CNT0 0x11208 + +#define S_MAC_USED 16 +#define M_MAC_USED 0x7ffU +#define V_MAC_USED(x) ((x) << S_MAC_USED) +#define G_MAC_USED(x) (((x) >> S_MAC_USED) & M_MAC_USED) + +#define S_MAC_ALLOC 0 +#define M_MAC_ALLOC 0x7ffU +#define V_MAC_ALLOC(x) ((x) << S_MAC_ALLOC) +#define G_MAC_ALLOC(x) (((x) >> S_MAC_ALLOC) & M_MAC_ALLOC) + +#define A_MPS_RX_MAC_BG_PG_CNT1 0x1120c +#define A_MPS_RX_MAC_BG_PG_CNT2 0x11210 +#define A_MPS_RX_MAC_BG_PG_CNT3 0x11214 +#define A_MPS_RX_LPBK_BG_PG_CNT0 0x11218 + +#define S_LPBK_USED 16 +#define M_LPBK_USED 0x7ffU +#define V_LPBK_USED(x) ((x) << S_LPBK_USED) +#define G_LPBK_USED(x) (((x) >> S_LPBK_USED) & M_LPBK_USED) + +#define S_LPBK_ALLOC 0 +#define M_LPBK_ALLOC 0x7ffU +#define V_LPBK_ALLOC(x) ((x) << S_LPBK_ALLOC) +#define G_LPBK_ALLOC(x) (((x) >> S_LPBK_ALLOC) & M_LPBK_ALLOC) + +#define A_MPS_RX_LPBK_BG_PG_CNT1 0x1121c +#define A_MPS_RX_CONGESTION_THRESHOLD_BG0 0x11220 + +#define S_CONG_EN 31 +#define V_CONG_EN(x) ((x) << S_CONG_EN) +#define F_CONG_EN V_CONG_EN(1U) + +#define S_CONG_TH 0 +#define M_CONG_TH 0xfffffU +#define V_CONG_TH(x) ((x) << S_CONG_TH) +#define G_CONG_TH(x) (((x) >> S_CONG_TH) & M_CONG_TH) + +#define A_MPS_RX_CONGESTION_THRESHOLD_BG1 0x11224 +#define A_MPS_RX_CONGESTION_THRESHOLD_BG2 0x11228 +#define A_MPS_RX_CONGESTION_THRESHOLD_BG3 0x1122c +#define A_MPS_RX_GRE_PROT_TYPE 0x11230 + +#define S_NVGRE_EN 9 +#define V_NVGRE_EN(x) ((x) << S_NVGRE_EN) +#define F_NVGRE_EN V_NVGRE_EN(1U) + +#define S_GRE_EN 8 +#define V_GRE_EN(x) ((x) << S_GRE_EN) +#define F_GRE_EN V_GRE_EN(1U) + +#define S_GRE 0 +#define M_GRE 0xffU +#define V_GRE(x) ((x) << S_GRE) +#define G_GRE(x) (((x) >> S_GRE) & M_GRE) + +#define A_MPS_RX_VXLAN_TYPE 0x11234 + +#define S_VXLAN_EN 16 +#define V_VXLAN_EN(x) ((x) << S_VXLAN_EN) +#define F_VXLAN_EN V_VXLAN_EN(1U) + +#define S_VXLAN 0 +#define M_VXLAN 0xffffU +#define V_VXLAN(x) ((x) << S_VXLAN) +#define G_VXLAN(x) (((x) >> S_VXLAN) & M_VXLAN) + +#define A_MPS_RX_GENEVE_TYPE 0x11238 + +#define S_GENEVE_EN 16 +#define V_GENEVE_EN(x) ((x) << S_GENEVE_EN) +#define F_GENEVE_EN V_GENEVE_EN(1U) + +#define S_GENEVE 0 +#define M_GENEVE 0xffffU +#define V_GENEVE(x) ((x) << S_GENEVE) +#define G_GENEVE(x) (((x) >> S_GENEVE) & M_GENEVE) + +#define A_MPS_RX_INNER_HDR_IVLAN 0x1123c + +#define S_T6_IVLAN_EN 16 +#define V_T6_IVLAN_EN(x) ((x) << S_T6_IVLAN_EN) +#define F_T6_IVLAN_EN V_T6_IVLAN_EN(1U) + +#define A_MPS_RX_ENCAP_NVGRE 0x11240 + +#define S_ETYPE_EN 16 +#define V_ETYPE_EN(x) ((x) << S_ETYPE_EN) +#define F_ETYPE_EN V_ETYPE_EN(1U) + +#define S_T6_ETYPE 0 +#define M_T6_ETYPE 0xffffU +#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE) +#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE) + +#define A_MPS_RX_ENCAP_GENEVE 0x11244 + +#define S_T6_ETYPE 0 +#define M_T6_ETYPE 0xffffU +#define V_T6_ETYPE(x) ((x) << S_T6_ETYPE) +#define G_T6_ETYPE(x) (((x) >> S_T6_ETYPE) & M_T6_ETYPE) + +#define A_MPS_RX_TCP 0x11248 + +#define S_PROT_TYPE_EN 8 +#define V_PROT_TYPE_EN(x) ((x) << S_PROT_TYPE_EN) +#define F_PROT_TYPE_EN V_PROT_TYPE_EN(1U) + +#define S_PROT_TYPE 0 +#define M_PROT_TYPE 0xffU +#define V_PROT_TYPE(x) ((x) << S_PROT_TYPE) +#define G_PROT_TYPE(x) (((x) >> S_PROT_TYPE) & M_PROT_TYPE) + +#define A_MPS_RX_UDP 0x1124c +#define A_MPS_RX_PAUSE 0x11250 +#define A_MPS_RX_LENGTH 0x11254 + +#define S_SAP_VALUE 16 +#define M_SAP_VALUE 0xffffU +#define V_SAP_VALUE(x) ((x) << S_SAP_VALUE) +#define G_SAP_VALUE(x) (((x) >> S_SAP_VALUE) & M_SAP_VALUE) + +#define S_LENGTH_ETYPE 0 +#define M_LENGTH_ETYPE 0xffffU +#define V_LENGTH_ETYPE(x) ((x) << S_LENGTH_ETYPE) +#define G_LENGTH_ETYPE(x) (((x) >> S_LENGTH_ETYPE) & M_LENGTH_ETYPE) + +#define A_MPS_RX_CTL_ORG 0x11258 + +#define S_CTL_VALUE 24 +#define M_CTL_VALUE 0xffU +#define V_CTL_VALUE(x) ((x) << S_CTL_VALUE) +#define G_CTL_VALUE(x) (((x) >> S_CTL_VALUE) & M_CTL_VALUE) + +#define S_ORG_VALUE 0 +#define M_ORG_VALUE 0xffffffU +#define V_ORG_VALUE(x) ((x) << S_ORG_VALUE) +#define G_ORG_VALUE(x) (((x) >> S_ORG_VALUE) & M_ORG_VALUE) + +#define A_MPS_RX_IPV4 0x1125c + +#define S_ETYPE_IPV4 0 +#define M_ETYPE_IPV4 0xffffU +#define V_ETYPE_IPV4(x) ((x) << S_ETYPE_IPV4) +#define G_ETYPE_IPV4(x) (((x) >> S_ETYPE_IPV4) & M_ETYPE_IPV4) + +#define A_MPS_RX_IPV6 0x11260 + +#define S_ETYPE_IPV6 0 +#define M_ETYPE_IPV6 0xffffU +#define V_ETYPE_IPV6(x) ((x) << S_ETYPE_IPV6) +#define G_ETYPE_IPV6(x) (((x) >> S_ETYPE_IPV6) & M_ETYPE_IPV6) + +#define A_MPS_RX_TTL 0x11264 + +#define S_TTL_IPV4 10 +#define M_TTL_IPV4 0xffU +#define V_TTL_IPV4(x) ((x) << S_TTL_IPV4) +#define G_TTL_IPV4(x) (((x) >> S_TTL_IPV4) & M_TTL_IPV4) + +#define S_TTL_IPV6 2 +#define M_TTL_IPV6 0xffU +#define V_TTL_IPV6(x) ((x) << S_TTL_IPV6) +#define G_TTL_IPV6(x) (((x) >> S_TTL_IPV6) & M_TTL_IPV6) + +#define S_TTL_CHK_EN_IPV4 1 +#define V_TTL_CHK_EN_IPV4(x) ((x) << S_TTL_CHK_EN_IPV4) +#define F_TTL_CHK_EN_IPV4 V_TTL_CHK_EN_IPV4(1U) + +#define S_TTL_CHK_EN_IPV6 0 +#define V_TTL_CHK_EN_IPV6(x) ((x) << S_TTL_CHK_EN_IPV6) +#define F_TTL_CHK_EN_IPV6 V_TTL_CHK_EN_IPV6(1U) + +#define A_MPS_RX_DEFAULT_VNI 0x11268 + +#define S_VNI 0 +#define M_VNI 0xffffffU +#define V_VNI(x) ((x) << S_VNI) +#define G_VNI(x) (((x) >> S_VNI) & M_VNI) + +#define A_MPS_RX_PRS_CTL 0x1126c + +#define S_CTL_CHK_EN 28 +#define V_CTL_CHK_EN(x) ((x) << S_CTL_CHK_EN) +#define F_CTL_CHK_EN V_CTL_CHK_EN(1U) + +#define S_ORG_CHK_EN 27 +#define V_ORG_CHK_EN(x) ((x) << S_ORG_CHK_EN) +#define F_ORG_CHK_EN V_ORG_CHK_EN(1U) + +#define S_SAP_CHK_EN 26 +#define V_SAP_CHK_EN(x) ((x) << S_SAP_CHK_EN) +#define F_SAP_CHK_EN V_SAP_CHK_EN(1U) + +#define S_VXLAN_FLAG_CHK_EN 25 +#define V_VXLAN_FLAG_CHK_EN(x) ((x) << S_VXLAN_FLAG_CHK_EN) +#define F_VXLAN_FLAG_CHK_EN V_VXLAN_FLAG_CHK_EN(1U) + +#define S_VXLAN_FLAG_MASK 17 +#define M_VXLAN_FLAG_MASK 0xffU +#define V_VXLAN_FLAG_MASK(x) ((x) << S_VXLAN_FLAG_MASK) +#define G_VXLAN_FLAG_MASK(x) (((x) >> S_VXLAN_FLAG_MASK) & M_VXLAN_FLAG_MASK) + +#define S_VXLAN_FLAG 9 +#define M_VXLAN_FLAG 0xffU +#define V_VXLAN_FLAG(x) ((x) << S_VXLAN_FLAG) +#define G_VXLAN_FLAG(x) (((x) >> S_VXLAN_FLAG) & M_VXLAN_FLAG) + +#define S_GRE_VER_CHK_EN 8 +#define V_GRE_VER_CHK_EN(x) ((x) << S_GRE_VER_CHK_EN) +#define F_GRE_VER_CHK_EN V_GRE_VER_CHK_EN(1U) + +#define S_GRE_VER 5 +#define M_GRE_VER 0x7U +#define V_GRE_VER(x) ((x) << S_GRE_VER) +#define G_GRE_VER(x) (((x) >> S_GRE_VER) & M_GRE_VER) + +#define S_GENEVE_VER_CHK_EN 4 +#define V_GENEVE_VER_CHK_EN(x) ((x) << S_GENEVE_VER_CHK_EN) +#define F_GENEVE_VER_CHK_EN V_GENEVE_VER_CHK_EN(1U) + +#define S_GENEVE_VER 2 +#define M_GENEVE_VER 0x3U +#define V_GENEVE_VER(x) ((x) << S_GENEVE_VER) +#define G_GENEVE_VER(x) (((x) >> S_GENEVE_VER) & M_GENEVE_VER) + +#define S_DIP_EN 1 +#define V_DIP_EN(x) ((x) << S_DIP_EN) +#define F_DIP_EN V_DIP_EN(1U) + +#define A_MPS_RX_PRS_CTL_2 0x11270 + +#define S_EN_UDP_CSUM_CHK 4 +#define V_EN_UDP_CSUM_CHK(x) ((x) << S_EN_UDP_CSUM_CHK) +#define F_EN_UDP_CSUM_CHK V_EN_UDP_CSUM_CHK(1U) + +#define S_EN_UDP_LEN_CHK 3 +#define V_EN_UDP_LEN_CHK(x) ((x) << S_EN_UDP_LEN_CHK) +#define F_EN_UDP_LEN_CHK V_EN_UDP_LEN_CHK(1U) + +#define S_EN_IP_CSUM_CHK 2 +#define V_EN_IP_CSUM_CHK(x) ((x) << S_EN_IP_CSUM_CHK) +#define F_EN_IP_CSUM_CHK V_EN_IP_CSUM_CHK(1U) + +#define S_EN_IP_PAYLOAD_LEN_CHK 1 +#define V_EN_IP_PAYLOAD_LEN_CHK(x) ((x) << S_EN_IP_PAYLOAD_LEN_CHK) +#define F_EN_IP_PAYLOAD_LEN_CHK V_EN_IP_PAYLOAD_LEN_CHK(1U) + +#define S_T6_IPV6_UDP_CSUM_COMPAT 0 +#define V_T6_IPV6_UDP_CSUM_COMPAT(x) ((x) << S_T6_IPV6_UDP_CSUM_COMPAT) +#define F_T6_IPV6_UDP_CSUM_COMPAT V_T6_IPV6_UDP_CSUM_COMPAT(1U) + +#define A_MPS_RX_MPS2NCSI_CNT 0x11274 +#define A_MPS_RX_MAX_TNL_HDR_LEN 0x11278 + +#define S_T6_LEN 0 +#define M_T6_LEN 0x1ffU +#define V_T6_LEN(x) ((x) << S_T6_LEN) +#define G_T6_LEN(x) (((x) >> S_T6_LEN) & M_T6_LEN) + +#define A_MPS_RX_PAUSE_DA_H 0x1127c +#define A_MPS_RX_PAUSE_DA_L 0x11280 +#define A_MPS_RX_CNT_NVGRE_PKT_MAC0 0x11284 +#define A_MPS_RX_CNT_VXLAN_PKT_MAC0 0x11288 +#define A_MPS_RX_CNT_GENEVE_PKT_MAC0 0x1128c +#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC0 0x11290 +#define A_MPS_RX_CNT_NVGRE_PKT_MAC1 0x11294 +#define A_MPS_RX_CNT_VXLAN_PKT_MAC1 0x11298 +#define A_MPS_RX_CNT_GENEVE_PKT_MAC1 0x1129c +#define A_MPS_RX_CNT_TNL_ERR_PKT_MAC1 0x112a0 +#define A_MPS_RX_CNT_NVGRE_PKT_LPBK0 0x112a4 +#define A_MPS_RX_CNT_VXLAN_PKT_LPBK0 0x112a8 +#define A_MPS_RX_CNT_GENEVE_PKT_LPBK0 0x112ac +#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK0 0x112b0 +#define A_MPS_RX_CNT_NVGRE_PKT_LPBK1 0x112b4 +#define A_MPS_RX_CNT_VXLAN_PKT_LPBK1 0x112b8 +#define A_MPS_RX_CNT_GENEVE_PKT_LPBK1 0x112bc +#define A_MPS_RX_CNT_TNL_ERR_PKT_LPBK1 0x112c0 +#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP0 0x112c4 +#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP0 0x112c8 +#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP0 0x112cc +#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP0 0x112d0 +#define A_MPS_RX_CNT_NVGRE_PKT_TO_TP1 0x112d4 +#define A_MPS_RX_CNT_VXLAN_PKT_TO_TP1 0x112d8 +#define A_MPS_RX_CNT_GENEVE_PKT_TO_TP1 0x112dc +#define A_MPS_RX_CNT_TNL_ERR_PKT_TO_TP1 0x112e0 +#define A_MPS_VF_RPLCT_MAP4 0x11300 +#define A_MPS_VF_RPLCT_MAP5 0x11304 +#define A_MPS_VF_RPLCT_MAP6 0x11308 +#define A_MPS_VF_RPLCT_MAP7 0x1130c +#define A_MPS_CLS_DIPIPV4_ID_TABLE 0x12000 +#define A_MPS_CLS_DIPIPV4_MASK_TABLE 0x12004 +#define A_MPS_CLS_DIPIPV6ID_0_TABLE 0x12020 +#define A_MPS_CLS_DIPIPV6ID_1_TABLE 0x12024 +#define A_MPS_CLS_DIPIPV6ID_2_TABLE 0x12028 +#define A_MPS_CLS_DIPIPV6ID_3_TABLE 0x1202c +#define A_MPS_CLS_DIPIPV6MASK_0_TABLE 0x12030 +#define A_MPS_CLS_DIPIPV6MASK_1_TABLE 0x12034 +#define A_MPS_CLS_DIPIPV6MASK_2_TABLE 0x12038 +#define A_MPS_CLS_DIPIPV6MASK_3_TABLE 0x1203c +#define A_MPS_RX_HASH_LKP_TABLE 0x12060 + /* registers for module CPL_SWITCH */ #define CPL_SWITCH_BASE_ADDR 0x19040 @@ -26232,6 +36759,10 @@ #define V_PCIE_PART_CGEN(x) ((x) << S_PCIE_PART_CGEN) #define F_PCIE_PART_CGEN V_PCIE_PART_CGEN(1U) +#define S_PL_DIS_PRTY_CHK 20 +#define V_PL_DIS_PRTY_CHK(x) ((x) << S_PL_DIS_PRTY_CHK) +#define F_PL_DIS_PRTY_CHK V_PL_DIS_PRTY_CHK(1U) + #define A_PMU_SLEEPMODE_WAKEUP 0x19124 #define S_HWWAKEUPEN 5 @@ -26720,6 +37251,14 @@ #define V_PERR_RSVD1(x) ((x) << S_PERR_RSVD1) #define F_PERR_RSVD1 V_PERR_RSVD1(1U) +#define S_PERR_ENABLE_CTX_1 24 +#define V_PERR_ENABLE_CTX_1(x) ((x) << S_PERR_ENABLE_CTX_1) +#define F_PERR_ENABLE_CTX_1 V_PERR_ENABLE_CTX_1(1U) + +#define S_PERR_ENABLE_CTX_0 23 +#define V_PERR_ENABLE_CTX_0(x) ((x) << S_PERR_ENABLE_CTX_0) +#define F_PERR_ENABLE_CTX_0 V_PERR_ENABLE_CTX_0(1U) + #define A_ULP_RX_PERR_INJECT 0x191a0 #define A_ULP_RX_RQUDP_LLIMIT 0x191a4 #define A_ULP_RX_RQUDP_ULIMIT 0x191a8 @@ -26966,6 +37505,78 @@ #define V_SDC_CRC_PROT_EN(x) ((x) << S_SDC_CRC_PROT_EN) #define F_SDC_CRC_PROT_EN V_SDC_CRC_PROT_EN(1U) +#define S_ISCSI_DCRC_ERROR_CMP_EN 25 +#define V_ISCSI_DCRC_ERROR_CMP_EN(x) ((x) << S_ISCSI_DCRC_ERROR_CMP_EN) +#define F_ISCSI_DCRC_ERROR_CMP_EN V_ISCSI_DCRC_ERROR_CMP_EN(1U) + +#define S_ISCSITAGPI 24 +#define V_ISCSITAGPI(x) ((x) << S_ISCSITAGPI) +#define F_ISCSITAGPI V_ISCSITAGPI(1U) + +#define S_DDP_VERSION_1 22 +#define M_DDP_VERSION_1 0x3U +#define V_DDP_VERSION_1(x) ((x) << S_DDP_VERSION_1) +#define G_DDP_VERSION_1(x) (((x) >> S_DDP_VERSION_1) & M_DDP_VERSION_1) + +#define S_DDP_VERSION_0 20 +#define M_DDP_VERSION_0 0x3U +#define V_DDP_VERSION_0(x) ((x) << S_DDP_VERSION_0) +#define G_DDP_VERSION_0(x) (((x) >> S_DDP_VERSION_0) & M_DDP_VERSION_0) + +#define S_RDMA_VERSION_1 18 +#define M_RDMA_VERSION_1 0x3U +#define V_RDMA_VERSION_1(x) ((x) << S_RDMA_VERSION_1) +#define G_RDMA_VERSION_1(x) (((x) >> S_RDMA_VERSION_1) & M_RDMA_VERSION_1) + +#define S_RDMA_VERSION_0 16 +#define M_RDMA_VERSION_0 0x3U +#define V_RDMA_VERSION_0(x) ((x) << S_RDMA_VERSION_0) +#define G_RDMA_VERSION_0(x) (((x) >> S_RDMA_VERSION_0) & M_RDMA_VERSION_0) + +#define S_PBL_BOUND_CHECK_W_PGLEN 15 +#define V_PBL_BOUND_CHECK_W_PGLEN(x) ((x) << S_PBL_BOUND_CHECK_W_PGLEN) +#define F_PBL_BOUND_CHECK_W_PGLEN V_PBL_BOUND_CHECK_W_PGLEN(1U) + +#define S_ZBYTE_FIX_DISABLE 14 +#define V_ZBYTE_FIX_DISABLE(x) ((x) << S_ZBYTE_FIX_DISABLE) +#define F_ZBYTE_FIX_DISABLE V_ZBYTE_FIX_DISABLE(1U) + +#define S_T10_OFFSET_UPDATE_EN 13 +#define V_T10_OFFSET_UPDATE_EN(x) ((x) << S_T10_OFFSET_UPDATE_EN) +#define F_T10_OFFSET_UPDATE_EN V_T10_OFFSET_UPDATE_EN(1U) + +#define S_ULP_INSERT_PI 12 +#define V_ULP_INSERT_PI(x) ((x) << S_ULP_INSERT_PI) +#define F_ULP_INSERT_PI V_ULP_INSERT_PI(1U) + +#define S_PDU_DPI 11 +#define V_PDU_DPI(x) ((x) << S_PDU_DPI) +#define F_PDU_DPI V_PDU_DPI(1U) + +#define S_ISCSI_EFF_OFFSET_EN 10 +#define V_ISCSI_EFF_OFFSET_EN(x) ((x) << S_ISCSI_EFF_OFFSET_EN) +#define F_ISCSI_EFF_OFFSET_EN V_ISCSI_EFF_OFFSET_EN(1U) + +#define S_ISCSI_ALL_CMP_MODE 9 +#define V_ISCSI_ALL_CMP_MODE(x) ((x) << S_ISCSI_ALL_CMP_MODE) +#define F_ISCSI_ALL_CMP_MODE V_ISCSI_ALL_CMP_MODE(1U) + +#define S_ISCSI_ENABLE_HDR_CMD 8 +#define V_ISCSI_ENABLE_HDR_CMD(x) ((x) << S_ISCSI_ENABLE_HDR_CMD) +#define F_ISCSI_ENABLE_HDR_CMD V_ISCSI_ENABLE_HDR_CMD(1U) + +#define S_ISCSI_FORCE_CMP_MODE 7 +#define V_ISCSI_FORCE_CMP_MODE(x) ((x) << S_ISCSI_FORCE_CMP_MODE) +#define F_ISCSI_FORCE_CMP_MODE V_ISCSI_FORCE_CMP_MODE(1U) + +#define S_ISCSI_ENABLE_CMP_MODE 6 +#define V_ISCSI_ENABLE_CMP_MODE(x) ((x) << S_ISCSI_ENABLE_CMP_MODE) +#define F_ISCSI_ENABLE_CMP_MODE V_ISCSI_ENABLE_CMP_MODE(1U) + +#define S_PIO_RDMA_SEND_RQE 5 +#define V_PIO_RDMA_SEND_RQE(x) ((x) << S_PIO_RDMA_SEND_RQE) +#define F_PIO_RDMA_SEND_RQE V_PIO_RDMA_SEND_RQE(1U) + #define A_ULP_RX_CH0_CGEN 0x19260 #define S_BYPASS_CGEN 7 @@ -27143,6 +37754,44 @@ #define V_SEND_MSN_CHECK_ENABLE(x) ((x) << S_SEND_MSN_CHECK_ENABLE) #define F_SEND_MSN_CHECK_ENABLE V_SEND_MSN_CHECK_ENABLE(1U) +#define A_ULP_RX_TLS_PP_LLIMIT 0x192a4 + +#define S_TLSPPLLIMIT 6 +#define M_TLSPPLLIMIT 0x3ffffffU +#define V_TLSPPLLIMIT(x) ((x) << S_TLSPPLLIMIT) +#define G_TLSPPLLIMIT(x) (((x) >> S_TLSPPLLIMIT) & M_TLSPPLLIMIT) + +#define A_ULP_RX_TLS_PP_ULIMIT 0x192a8 + +#define S_TLSPPULIMIT 6 +#define M_TLSPPULIMIT 0x3ffffffU +#define V_TLSPPULIMIT(x) ((x) << S_TLSPPULIMIT) +#define G_TLSPPULIMIT(x) (((x) >> S_TLSPPULIMIT) & M_TLSPPULIMIT) + +#define A_ULP_RX_TLS_KEY_LLIMIT 0x192ac + +#define S_TLSKEYLLIMIT 8 +#define M_TLSKEYLLIMIT 0xffffffU +#define V_TLSKEYLLIMIT(x) ((x) << S_TLSKEYLLIMIT) +#define G_TLSKEYLLIMIT(x) (((x) >> S_TLSKEYLLIMIT) & M_TLSKEYLLIMIT) + +#define A_ULP_RX_TLS_KEY_ULIMIT 0x192b0 + +#define S_TLSKEYULIMIT 8 +#define M_TLSKEYULIMIT 0xffffffU +#define V_TLSKEYULIMIT(x) ((x) << S_TLSKEYULIMIT) +#define G_TLSKEYULIMIT(x) (((x) >> S_TLSKEYULIMIT) & M_TLSKEYULIMIT) + +#define A_ULP_RX_TLS_CTL 0x192bc +#define A_ULP_RX_TLS_IND_CMD 0x19348 + +#define S_TLS_RX_REG_OFF_ADDR 0 +#define M_TLS_RX_REG_OFF_ADDR 0x3ffU +#define V_TLS_RX_REG_OFF_ADDR(x) ((x) << S_TLS_RX_REG_OFF_ADDR) +#define G_TLS_RX_REG_OFF_ADDR(x) (((x) >> S_TLS_RX_REG_OFF_ADDR) & M_TLS_RX_REG_OFF_ADDR) + +#define A_ULP_RX_TLS_IND_DATA 0x1934c + /* registers for module SF */ #define SF_BASE_ADDR 0x193f8 @@ -27191,6 +37840,20 @@ #define V_VFID(x) ((x) << S_VFID) #define G_VFID(x) (((x) >> S_VFID) & M_VFID) +#define S_T6_SOURCEPF 9 +#define M_T6_SOURCEPF 0x7U +#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF) +#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF) + +#define S_T6_ISVF 8 +#define V_T6_ISVF(x) ((x) << S_T6_ISVF) +#define F_T6_ISVF V_T6_ISVF(1U) + +#define S_T6_VFID 0 +#define M_T6_VFID 0xffU +#define V_T6_VFID(x) ((x) << S_T6_VFID) +#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID) + #define A_PL_VF_REV 0x4 #define S_CHIPID 4 @@ -27225,6 +37888,21 @@ #define F_SWINT V_SWINT(1U) #define A_PL_WHOAMI 0x19400 + +#define S_T6_SOURCEPF 9 +#define M_T6_SOURCEPF 0x7U +#define V_T6_SOURCEPF(x) ((x) << S_T6_SOURCEPF) +#define G_T6_SOURCEPF(x) (((x) >> S_T6_SOURCEPF) & M_T6_SOURCEPF) + +#define S_T6_ISVF 8 +#define V_T6_ISVF(x) ((x) << S_T6_ISVF) +#define F_T6_ISVF V_T6_ISVF(1U) + +#define S_T6_VFID 0 +#define M_T6_VFID 0xffU +#define V_T6_VFID(x) ((x) << S_T6_VFID) +#define G_T6_VFID(x) (((x) >> S_T6_VFID) & M_T6_VFID) + #define A_PL_PERR_CAUSE 0x19404 #define S_UART 28 @@ -27542,16 +38220,71 @@ #define V_LN0_AECMD(x) ((x) << S_LN0_AECMD) #define G_LN0_AECMD(x) (((x) >> S_LN0_AECMD) & M_LN0_AECMD) +#define S_T5_STATECFGINITF 16 +#define M_T5_STATECFGINITF 0x7fU +#define V_T5_STATECFGINITF(x) ((x) << S_T5_STATECFGINITF) +#define G_T5_STATECFGINITF(x) (((x) >> S_T5_STATECFGINITF) & M_T5_STATECFGINITF) + +#define S_T5_STATECFGINIT 12 +#define M_T5_STATECFGINIT 0xfU +#define V_T5_STATECFGINIT(x) ((x) << S_T5_STATECFGINIT) +#define G_T5_STATECFGINIT(x) (((x) >> S_T5_STATECFGINIT) & M_T5_STATECFGINIT) + #define S_PCIE_SPEED 8 #define M_PCIE_SPEED 0x3U #define V_PCIE_SPEED(x) ((x) << S_PCIE_SPEED) #define G_PCIE_SPEED(x) (((x) >> S_PCIE_SPEED) & M_PCIE_SPEED) +#define S_T5_PERSTTIMEOUT 7 +#define V_T5_PERSTTIMEOUT(x) ((x) << S_T5_PERSTTIMEOUT) +#define F_T5_PERSTTIMEOUT V_T5_PERSTTIMEOUT(1U) + +#define S_T5_LTSSMENABLE 6 +#define V_T5_LTSSMENABLE(x) ((x) << S_T5_LTSSMENABLE) +#define F_T5_LTSSMENABLE V_T5_LTSSMENABLE(1U) + #define S_LTSSM 0 #define M_LTSSM 0x3fU #define V_LTSSM(x) ((x) << S_LTSSM) #define G_LTSSM(x) (((x) >> S_LTSSM) & M_LTSSM) +#define S_T6_LN0_AESTAT 27 +#define M_T6_LN0_AESTAT 0x7U +#define V_T6_LN0_AESTAT(x) ((x) << S_T6_LN0_AESTAT) +#define G_T6_LN0_AESTAT(x) (((x) >> S_T6_LN0_AESTAT) & M_T6_LN0_AESTAT) + +#define S_T6_LN0_AECMD 24 +#define M_T6_LN0_AECMD 0x7U +#define V_T6_LN0_AECMD(x) ((x) << S_T6_LN0_AECMD) +#define G_T6_LN0_AECMD(x) (((x) >> S_T6_LN0_AECMD) & M_T6_LN0_AECMD) + +#define S_T6_STATECFGINITF 16 +#define M_T6_STATECFGINITF 0xffU +#define V_T6_STATECFGINITF(x) ((x) << S_T6_STATECFGINITF) +#define G_T6_STATECFGINITF(x) (((x) >> S_T6_STATECFGINITF) & M_T6_STATECFGINITF) + +#define S_T6_STATECFGINIT 12 +#define M_T6_STATECFGINIT 0xfU +#define V_T6_STATECFGINIT(x) ((x) << S_T6_STATECFGINIT) +#define G_T6_STATECFGINIT(x) (((x) >> S_T6_STATECFGINIT) & M_T6_STATECFGINIT) + +#define S_PHY_STATUS 10 +#define V_PHY_STATUS(x) ((x) << S_PHY_STATUS) +#define F_PHY_STATUS V_PHY_STATUS(1U) + +#define S_SPEED_PL 8 +#define M_SPEED_PL 0x3U +#define V_SPEED_PL(x) ((x) << S_SPEED_PL) +#define G_SPEED_PL(x) (((x) >> S_SPEED_PL) & M_SPEED_PL) + +#define S_PERSTTIMEOUT_PL 7 +#define V_PERSTTIMEOUT_PL(x) ((x) << S_PERSTTIMEOUT_PL) +#define F_PERSTTIMEOUT_PL V_PERSTTIMEOUT_PL(1U) + +#define S_T6_LTSSMENABLE 6 +#define V_T6_LTSSMENABLE(x) ((x) << S_T6_LTSSMENABLE) +#define F_T6_LTSSMENABLE V_T6_LTSSMENABLE(1U) + #define A_PL_PCIE_CTL_STAT 0x19444 #define S_PCIE_STATUS 16 @@ -27733,6 +38466,11 @@ #define V_PL_TOVFID(x) ((x) << S_PL_TOVFID) #define G_PL_TOVFID(x) (((x) >> S_PL_TOVFID) & M_PL_TOVFID) +#define S_T6_PL_TOVFID 0 +#define M_T6_PL_TOVFID 0x1ffU +#define V_T6_PL_TOVFID(x) ((x) << S_T6_PL_TOVFID) +#define G_T6_PL_TOVFID(x) (((x) >> S_T6_PL_TOVFID) & M_T6_PL_TOVFID) + #define A_PL_VFID_MAP 0x19800 #define S_VFID_VLD 7 @@ -27743,6 +38481,7 @@ #define LE_BASE_ADDR 0x19c00 #define A_LE_BUF_CONFIG 0x19c00 +#define A_LE_DB_ID 0x19c00 #define A_LE_DB_CONFIG 0x19c04 #define S_TCAMCMDOVLAPEN 21 @@ -27850,6 +38589,51 @@ #define V_SINGLETHREAD(x) ((x) << S_SINGLETHREAD) #define F_SINGLETHREAD V_SINGLETHREAD(1U) +#define S_CHK_FUL_TUP_ZERO 27 +#define V_CHK_FUL_TUP_ZERO(x) ((x) << S_CHK_FUL_TUP_ZERO) +#define F_CHK_FUL_TUP_ZERO V_CHK_FUL_TUP_ZERO(1U) + +#define S_PRI_HASH 26 +#define V_PRI_HASH(x) ((x) << S_PRI_HASH) +#define F_PRI_HASH V_PRI_HASH(1U) + +#define S_EXTN_HASH_IPV4 25 +#define V_EXTN_HASH_IPV4(x) ((x) << S_EXTN_HASH_IPV4) +#define F_EXTN_HASH_IPV4 V_EXTN_HASH_IPV4(1U) + +#define S_ASLIPCOMPEN_IPV4 18 +#define V_ASLIPCOMPEN_IPV4(x) ((x) << S_ASLIPCOMPEN_IPV4) +#define F_ASLIPCOMPEN_IPV4 V_ASLIPCOMPEN_IPV4(1U) + +#define S_IGNR_TUP_ZERO 9 +#define V_IGNR_TUP_ZERO(x) ((x) << S_IGNR_TUP_ZERO) +#define F_IGNR_TUP_ZERO V_IGNR_TUP_ZERO(1U) + +#define S_IGNR_LIP_ZERO 8 +#define V_IGNR_LIP_ZERO(x) ((x) << S_IGNR_LIP_ZERO) +#define F_IGNR_LIP_ZERO V_IGNR_LIP_ZERO(1U) + +#define S_CLCAM_INIT_BUSY 7 +#define V_CLCAM_INIT_BUSY(x) ((x) << S_CLCAM_INIT_BUSY) +#define F_CLCAM_INIT_BUSY V_CLCAM_INIT_BUSY(1U) + +#define S_CLCAM_INIT 6 +#define V_CLCAM_INIT(x) ((x) << S_CLCAM_INIT) +#define F_CLCAM_INIT V_CLCAM_INIT(1U) + +#define S_MTCAM_INIT_BUSY 5 +#define V_MTCAM_INIT_BUSY(x) ((x) << S_MTCAM_INIT_BUSY) +#define F_MTCAM_INIT_BUSY V_MTCAM_INIT_BUSY(1U) + +#define S_MTCAM_INIT 4 +#define V_MTCAM_INIT(x) ((x) << S_MTCAM_INIT) +#define F_MTCAM_INIT V_MTCAM_INIT(1U) + +#define S_REGION_EN 0 +#define M_REGION_EN 0xfU +#define V_REGION_EN(x) ((x) << S_REGION_EN) +#define G_REGION_EN(x) (((x) >> S_REGION_EN) & M_REGION_EN) + #define A_LE_MISC 0x19c08 #define S_CMPUNVAIL 0 @@ -27889,6 +38673,43 @@ #define V_HASHCLKOFF(x) ((x) << S_HASHCLKOFF) #define F_HASHCLKOFF V_HASHCLKOFF(1U) +#define A_LE_DB_EXEC_CTRL 0x19c08 + +#define S_TPDB_IF_PAUSE_ACK 10 +#define V_TPDB_IF_PAUSE_ACK(x) ((x) << S_TPDB_IF_PAUSE_ACK) +#define F_TPDB_IF_PAUSE_ACK V_TPDB_IF_PAUSE_ACK(1U) + +#define S_TPDB_IF_PAUSE_REQ 9 +#define V_TPDB_IF_PAUSE_REQ(x) ((x) << S_TPDB_IF_PAUSE_REQ) +#define F_TPDB_IF_PAUSE_REQ V_TPDB_IF_PAUSE_REQ(1U) + +#define S_ERRSTOP_EN 8 +#define V_ERRSTOP_EN(x) ((x) << S_ERRSTOP_EN) +#define F_ERRSTOP_EN V_ERRSTOP_EN(1U) + +#define S_CMDLIMIT 0 +#define M_CMDLIMIT 0xffU +#define V_CMDLIMIT(x) ((x) << S_CMDLIMIT) +#define G_CMDLIMIT(x) (((x) >> S_CMDLIMIT) & M_CMDLIMIT) + +#define A_LE_DB_PS_CTRL 0x19c0c + +#define S_CLTCAMDEEPSLEEP_STAT 10 +#define V_CLTCAMDEEPSLEEP_STAT(x) ((x) << S_CLTCAMDEEPSLEEP_STAT) +#define F_CLTCAMDEEPSLEEP_STAT V_CLTCAMDEEPSLEEP_STAT(1U) + +#define S_TCAMDEEPSLEEP_STAT 9 +#define V_TCAMDEEPSLEEP_STAT(x) ((x) << S_TCAMDEEPSLEEP_STAT) +#define F_TCAMDEEPSLEEP_STAT V_TCAMDEEPSLEEP_STAT(1U) + +#define S_CLTCAMDEEPSLEEP 7 +#define V_CLTCAMDEEPSLEEP(x) ((x) << S_CLTCAMDEEPSLEEP) +#define F_CLTCAMDEEPSLEEP V_CLTCAMDEEPSLEEP(1U) + +#define S_TCAMDEEPSLEEP 6 +#define V_TCAMDEEPSLEEP(x) ((x) << S_TCAMDEEPSLEEP) +#define F_TCAMDEEPSLEEP V_TCAMDEEPSLEEP(1U) + #define A_LE_DB_ROUTING_TABLE_INDEX 0x19c10 #define S_RTINDX 7 @@ -27896,6 +38717,13 @@ #define V_RTINDX(x) ((x) << S_RTINDX) #define G_RTINDX(x) (((x) >> S_RTINDX) & M_RTINDX) +#define A_LE_DB_ACTIVE_TABLE_START_INDEX 0x19c10 + +#define S_ATINDX 0 +#define M_ATINDX 0xfffffU +#define V_ATINDX(x) ((x) << S_ATINDX) +#define G_ATINDX(x) (((x) >> S_ATINDX) & M_ATINDX) + #define A_LE_DB_FILTER_TABLE_INDEX 0x19c14 #define S_FTINDX 7 @@ -27903,6 +38731,13 @@ #define V_FTINDX(x) ((x) << S_FTINDX) #define G_FTINDX(x) (((x) >> S_FTINDX) & M_FTINDX) +#define A_LE_DB_NORM_FILT_TABLE_START_INDEX 0x19c14 + +#define S_NFTINDX 0 +#define M_NFTINDX 0xfffffU +#define V_NFTINDX(x) ((x) << S_NFTINDX) +#define G_NFTINDX(x) (((x) >> S_NFTINDX) & M_NFTINDX) + #define A_LE_DB_SERVER_INDEX 0x19c18 #define S_SRINDX 7 @@ -27910,6 +38745,13 @@ #define V_SRINDX(x) ((x) << S_SRINDX) #define G_SRINDX(x) (((x) >> S_SRINDX) & M_SRINDX) +#define A_LE_DB_SRVR_START_INDEX 0x19c18 + +#define S_T6_SRINDX 0 +#define M_T6_SRINDX 0xfffffU +#define V_T6_SRINDX(x) ((x) << S_T6_SRINDX) +#define G_T6_SRINDX(x) (((x) >> S_T6_SRINDX) & M_T6_SRINDX) + #define A_LE_DB_CLIP_TABLE_INDEX 0x19c1c #define S_CLIPTINDX 7 @@ -27917,6 +38759,13 @@ #define V_CLIPTINDX(x) ((x) << S_CLIPTINDX) #define G_CLIPTINDX(x) (((x) >> S_CLIPTINDX) & M_CLIPTINDX) +#define A_LE_DB_HPRI_FILT_TABLE_START_INDEX 0x19c1c + +#define S_HFTINDX 0 +#define M_HFTINDX 0xfffffU +#define V_HFTINDX(x) ((x) << S_HFTINDX) +#define G_HFTINDX(x) (((x) >> S_HFTINDX) & M_HFTINDX) + #define A_LE_DB_ACT_CNT_IPV4 0x19c20 #define S_ACTCNTIPV4 0 @@ -27943,9 +38792,40 @@ #define V_HASHSIZE(x) ((x) << S_HASHSIZE) #define G_HASHSIZE(x) (((x) >> S_HASHSIZE) & M_HASHSIZE) +#define S_NUMHASHBKT 20 +#define M_NUMHASHBKT 0x1fU +#define V_NUMHASHBKT(x) ((x) << S_NUMHASHBKT) +#define G_NUMHASHBKT(x) (((x) >> S_NUMHASHBKT) & M_NUMHASHBKT) + +#define S_HASHTBLSIZE 3 +#define M_HASHTBLSIZE 0x1ffffU +#define V_HASHTBLSIZE(x) ((x) << S_HASHTBLSIZE) +#define G_HASHTBLSIZE(x) (((x) >> S_HASHTBLSIZE) & M_HASHTBLSIZE) + #define A_LE_DB_HASH_TABLE_BASE 0x19c2c +#define A_LE_DB_MIN_NUM_ACTV_TCAM_ENTRIES 0x19c2c + +#define S_MIN_ATCAM_ENTS 0 +#define M_MIN_ATCAM_ENTS 0xfffffU +#define V_MIN_ATCAM_ENTS(x) ((x) << S_MIN_ATCAM_ENTS) +#define G_MIN_ATCAM_ENTS(x) (((x) >> S_MIN_ATCAM_ENTS) & M_MIN_ATCAM_ENTS) + #define A_LE_DB_HASH_TID_BASE 0x19c30 +#define A_LE_DB_HASH_TBL_BASE_ADDR 0x19c30 + +#define S_HASHTBLADDR 4 +#define M_HASHTBLADDR 0xfffffffU +#define V_HASHTBLADDR(x) ((x) << S_HASHTBLADDR) +#define G_HASHTBLADDR(x) (((x) >> S_HASHTBLADDR) & M_HASHTBLADDR) + #define A_LE_DB_SIZE 0x19c34 +#define A_LE_TCAM_SIZE 0x19c34 + +#define S_TCAM_SIZE 0 +#define M_TCAM_SIZE 0x3U +#define V_TCAM_SIZE(x) ((x) << S_TCAM_SIZE) +#define G_TCAM_SIZE(x) (((x) >> S_TCAM_SIZE) & M_TCAM_SIZE) + #define A_LE_DB_INT_ENABLE 0x19c38 #define S_MSGSEL 27 @@ -28041,7 +38921,160 @@ #define V_VFPARERR(x) ((x) << S_VFPARERR) #define F_VFPARERR V_VFPARERR(1U) +#define S_CLIPSUBERR 29 +#define V_CLIPSUBERR(x) ((x) << S_CLIPSUBERR) +#define F_CLIPSUBERR V_CLIPSUBERR(1U) + +#define S_CLCAMFIFOERR 28 +#define V_CLCAMFIFOERR(x) ((x) << S_CLCAMFIFOERR) +#define F_CLCAMFIFOERR V_CLCAMFIFOERR(1U) + +#define S_HASHTBLMEMCRCERR 27 +#define V_HASHTBLMEMCRCERR(x) ((x) << S_HASHTBLMEMCRCERR) +#define F_HASHTBLMEMCRCERR V_HASHTBLMEMCRCERR(1U) + +#define S_CTCAMINVLDENT 26 +#define V_CTCAMINVLDENT(x) ((x) << S_CTCAMINVLDENT) +#define F_CTCAMINVLDENT V_CTCAMINVLDENT(1U) + +#define S_TCAMINVLDENT 25 +#define V_TCAMINVLDENT(x) ((x) << S_TCAMINVLDENT) +#define F_TCAMINVLDENT V_TCAMINVLDENT(1U) + +#define S_TOTCNTERR 24 +#define V_TOTCNTERR(x) ((x) << S_TOTCNTERR) +#define F_TOTCNTERR V_TOTCNTERR(1U) + +#define S_CMDPRSRINTERR 23 +#define V_CMDPRSRINTERR(x) ((x) << S_CMDPRSRINTERR) +#define F_CMDPRSRINTERR V_CMDPRSRINTERR(1U) + +#define S_CMDTIDERR 22 +#define V_CMDTIDERR(x) ((x) << S_CMDTIDERR) +#define F_CMDTIDERR V_CMDTIDERR(1U) + +#define S_T6_ACTRGNFULL 21 +#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL) +#define F_T6_ACTRGNFULL V_T6_ACTRGNFULL(1U) + +#define S_T6_ACTCNTIPV6TZERO 20 +#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO) +#define F_T6_ACTCNTIPV6TZERO V_T6_ACTCNTIPV6TZERO(1U) + +#define S_T6_ACTCNTIPV4TZERO 19 +#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO) +#define F_T6_ACTCNTIPV4TZERO V_T6_ACTCNTIPV4TZERO(1U) + +#define S_T6_ACTCNTIPV6ZERO 18 +#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO) +#define F_T6_ACTCNTIPV6ZERO V_T6_ACTCNTIPV6ZERO(1U) + +#define S_T6_ACTCNTIPV4ZERO 17 +#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO) +#define F_T6_ACTCNTIPV4ZERO V_T6_ACTCNTIPV4ZERO(1U) + +#define S_MAIFWRINTPERR 16 +#define V_MAIFWRINTPERR(x) ((x) << S_MAIFWRINTPERR) +#define F_MAIFWRINTPERR V_MAIFWRINTPERR(1U) + +#define S_HASHTBLMEMACCERR 15 +#define V_HASHTBLMEMACCERR(x) ((x) << S_HASHTBLMEMACCERR) +#define F_HASHTBLMEMACCERR V_HASHTBLMEMACCERR(1U) + +#define S_TCAMCRCERR 14 +#define V_TCAMCRCERR(x) ((x) << S_TCAMCRCERR) +#define F_TCAMCRCERR V_TCAMCRCERR(1U) + +#define S_TCAMINTPERR 13 +#define V_TCAMINTPERR(x) ((x) << S_TCAMINTPERR) +#define F_TCAMINTPERR V_TCAMINTPERR(1U) + +#define S_VFSRAMPERR 12 +#define V_VFSRAMPERR(x) ((x) << S_VFSRAMPERR) +#define F_VFSRAMPERR V_VFSRAMPERR(1U) + +#define S_SRVSRAMPERR 11 +#define V_SRVSRAMPERR(x) ((x) << S_SRVSRAMPERR) +#define F_SRVSRAMPERR V_SRVSRAMPERR(1U) + +#define S_SSRAMINTPERR 10 +#define V_SSRAMINTPERR(x) ((x) << S_SSRAMINTPERR) +#define F_SSRAMINTPERR V_SSRAMINTPERR(1U) + +#define S_CLCAMINTPERR 9 +#define V_CLCAMINTPERR(x) ((x) << S_CLCAMINTPERR) +#define F_CLCAMINTPERR V_CLCAMINTPERR(1U) + +#define S_CLCAMCRCPARERR 8 +#define V_CLCAMCRCPARERR(x) ((x) << S_CLCAMCRCPARERR) +#define F_CLCAMCRCPARERR V_CLCAMCRCPARERR(1U) + +#define S_HASHTBLACCFAIL 7 +#define V_HASHTBLACCFAIL(x) ((x) << S_HASHTBLACCFAIL) +#define F_HASHTBLACCFAIL V_HASHTBLACCFAIL(1U) + +#define S_TCAMACCFAIL 6 +#define V_TCAMACCFAIL(x) ((x) << S_TCAMACCFAIL) +#define F_TCAMACCFAIL V_TCAMACCFAIL(1U) + +#define S_SRVSRAMACCFAIL 5 +#define V_SRVSRAMACCFAIL(x) ((x) << S_SRVSRAMACCFAIL) +#define F_SRVSRAMACCFAIL V_SRVSRAMACCFAIL(1U) + +#define S_CLIPTCAMACCFAIL 4 +#define V_CLIPTCAMACCFAIL(x) ((x) << S_CLIPTCAMACCFAIL) +#define F_CLIPTCAMACCFAIL V_CLIPTCAMACCFAIL(1U) + +#define S_T6_UNKNOWNCMD 3 +#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD) +#define F_T6_UNKNOWNCMD V_T6_UNKNOWNCMD(1U) + +#define S_T6_LIP0 2 +#define V_T6_LIP0(x) ((x) << S_T6_LIP0) +#define F_T6_LIP0 V_T6_LIP0(1U) + +#define S_T6_LIPMISS 1 +#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS) +#define F_T6_LIPMISS V_T6_LIPMISS(1U) + +#define S_PIPELINEERR 0 +#define V_PIPELINEERR(x) ((x) << S_PIPELINEERR) +#define F_PIPELINEERR V_PIPELINEERR(1U) + #define A_LE_DB_INT_CAUSE 0x19c3c + +#define S_T6_ACTRGNFULL 21 +#define V_T6_ACTRGNFULL(x) ((x) << S_T6_ACTRGNFULL) +#define F_T6_ACTRGNFULL V_T6_ACTRGNFULL(1U) + +#define S_T6_ACTCNTIPV6TZERO 20 +#define V_T6_ACTCNTIPV6TZERO(x) ((x) << S_T6_ACTCNTIPV6TZERO) +#define F_T6_ACTCNTIPV6TZERO V_T6_ACTCNTIPV6TZERO(1U) + +#define S_T6_ACTCNTIPV4TZERO 19 +#define V_T6_ACTCNTIPV4TZERO(x) ((x) << S_T6_ACTCNTIPV4TZERO) +#define F_T6_ACTCNTIPV4TZERO V_T6_ACTCNTIPV4TZERO(1U) + +#define S_T6_ACTCNTIPV6ZERO 18 +#define V_T6_ACTCNTIPV6ZERO(x) ((x) << S_T6_ACTCNTIPV6ZERO) +#define F_T6_ACTCNTIPV6ZERO V_T6_ACTCNTIPV6ZERO(1U) + +#define S_T6_ACTCNTIPV4ZERO 17 +#define V_T6_ACTCNTIPV4ZERO(x) ((x) << S_T6_ACTCNTIPV4ZERO) +#define F_T6_ACTCNTIPV4ZERO V_T6_ACTCNTIPV4ZERO(1U) + +#define S_T6_UNKNOWNCMD 3 +#define V_T6_UNKNOWNCMD(x) ((x) << S_T6_UNKNOWNCMD) +#define F_T6_UNKNOWNCMD V_T6_UNKNOWNCMD(1U) + +#define S_T6_LIP0 2 +#define V_T6_LIP0(x) ((x) << S_T6_LIP0) +#define F_T6_LIP0 V_T6_LIP0(1U) + +#define S_T6_LIPMISS 1 +#define V_T6_LIPMISS(x) ((x) << S_T6_LIPMISS) +#define F_T6_LIPMISS V_T6_LIPMISS(1U) + #define A_LE_DB_INT_TID 0x19c40 #define S_INTTID 0 @@ -28049,6 +39082,18 @@ #define V_INTTID(x) ((x) << S_INTTID) #define G_INTTID(x) (((x) >> S_INTTID) & M_INTTID) +#define A_LE_DB_DBG_MATCH_CMD_IDX_MASK 0x19c40 + +#define S_CMD_CMP_MASK 20 +#define M_CMD_CMP_MASK 0x1fU +#define V_CMD_CMP_MASK(x) ((x) << S_CMD_CMP_MASK) +#define G_CMD_CMP_MASK(x) (((x) >> S_CMD_CMP_MASK) & M_CMD_CMP_MASK) + +#define S_TID_CMP_MASK 0 +#define M_TID_CMP_MASK 0xfffffU +#define V_TID_CMP_MASK(x) ((x) << S_TID_CMP_MASK) +#define G_TID_CMP_MASK(x) (((x) >> S_TID_CMP_MASK) & M_TID_CMP_MASK) + #define A_LE_DB_INT_PTID 0x19c44 #define S_INTPTID 0 @@ -28056,6 +39101,18 @@ #define V_INTPTID(x) ((x) << S_INTPTID) #define G_INTPTID(x) (((x) >> S_INTPTID) & M_INTPTID) +#define A_LE_DB_DBG_MATCH_CMD_IDX_DATA 0x19c44 + +#define S_CMD_CMP 20 +#define M_CMD_CMP 0x1fU +#define V_CMD_CMP(x) ((x) << S_CMD_CMP) +#define G_CMD_CMP(x) (((x) >> S_CMD_CMP) & M_CMD_CMP) + +#define S_TID_CMP 0 +#define M_TID_CMP 0xfffffU +#define V_TID_CMP(x) ((x) << S_TID_CMP) +#define G_TID_CMP(x) (((x) >> S_TID_CMP) & M_TID_CMP) + #define A_LE_DB_INT_INDEX 0x19c48 #define S_INTINDEX 0 @@ -28063,6 +39120,23 @@ #define V_INTINDEX(x) ((x) << S_INTINDEX) #define G_INTINDEX(x) (((x) >> S_INTINDEX) & M_INTINDEX) +#define A_LE_DB_ERR_CMD_TID 0x19c48 + +#define S_ERR_CID 22 +#define M_ERR_CID 0xffU +#define V_ERR_CID(x) ((x) << S_ERR_CID) +#define G_ERR_CID(x) (((x) >> S_ERR_CID) & M_ERR_CID) + +#define S_ERR_PROT 20 +#define M_ERR_PROT 0x3U +#define V_ERR_PROT(x) ((x) << S_ERR_PROT) +#define G_ERR_PROT(x) (((x) >> S_ERR_PROT) & M_ERR_PROT) + +#define S_ERR_TID 0 +#define M_ERR_TID 0xfffffU +#define V_ERR_TID(x) ((x) << S_ERR_TID) +#define G_ERR_TID(x) (((x) >> S_ERR_TID) & M_ERR_TID) + #define A_LE_DB_INT_CMD 0x19c4c #define S_INTCMD 0 @@ -28072,6 +39146,132 @@ #define A_LE_DB_MASK_IPV4 0x19c50 #define A_LE_T5_DB_MASK_IPV4 0x19c50 +#define A_LE_DB_DBG_MATCH_DATA_MASK 0x19c50 +#define A_LE_DB_MAX_NUM_HASH_ENTRIES 0x19c70 + +#define S_MAX_HASH_ENTS 0 +#define M_MAX_HASH_ENTS 0xfffffU +#define V_MAX_HASH_ENTS(x) ((x) << S_MAX_HASH_ENTS) +#define G_MAX_HASH_ENTS(x) (((x) >> S_MAX_HASH_ENTS) & M_MAX_HASH_ENTS) + +#define A_LE_DB_RSP_CODE_0 0x19c74 + +#define S_SUCCESS 25 +#define M_SUCCESS 0x1fU +#define V_SUCCESS(x) ((x) << S_SUCCESS) +#define G_SUCCESS(x) (((x) >> S_SUCCESS) & M_SUCCESS) + +#define S_TCAM_ACTV_SUCC 20 +#define M_TCAM_ACTV_SUCC 0x1fU +#define V_TCAM_ACTV_SUCC(x) ((x) << S_TCAM_ACTV_SUCC) +#define G_TCAM_ACTV_SUCC(x) (((x) >> S_TCAM_ACTV_SUCC) & M_TCAM_ACTV_SUCC) + +#define S_HASH_ACTV_SUCC 15 +#define M_HASH_ACTV_SUCC 0x1fU +#define V_HASH_ACTV_SUCC(x) ((x) << S_HASH_ACTV_SUCC) +#define G_HASH_ACTV_SUCC(x) (((x) >> S_HASH_ACTV_SUCC) & M_HASH_ACTV_SUCC) + +#define S_TCAM_SRVR_HIT 10 +#define M_TCAM_SRVR_HIT 0x1fU +#define V_TCAM_SRVR_HIT(x) ((x) << S_TCAM_SRVR_HIT) +#define G_TCAM_SRVR_HIT(x) (((x) >> S_TCAM_SRVR_HIT) & M_TCAM_SRVR_HIT) + +#define S_SRAM_SRVR_HIT 5 +#define M_SRAM_SRVR_HIT 0x1fU +#define V_SRAM_SRVR_HIT(x) ((x) << S_SRAM_SRVR_HIT) +#define G_SRAM_SRVR_HIT(x) (((x) >> S_SRAM_SRVR_HIT) & M_SRAM_SRVR_HIT) + +#define S_TCAM_ACTV_HIT 0 +#define M_TCAM_ACTV_HIT 0x1fU +#define V_TCAM_ACTV_HIT(x) ((x) << S_TCAM_ACTV_HIT) +#define G_TCAM_ACTV_HIT(x) (((x) >> S_TCAM_ACTV_HIT) & M_TCAM_ACTV_HIT) + +#define A_LE_DB_RSP_CODE_1 0x19c78 + +#define S_HASH_ACTV_HIT 25 +#define M_HASH_ACTV_HIT 0x1fU +#define V_HASH_ACTV_HIT(x) ((x) << S_HASH_ACTV_HIT) +#define G_HASH_ACTV_HIT(x) (((x) >> S_HASH_ACTV_HIT) & M_HASH_ACTV_HIT) + +#define S_T6_MISS 20 +#define M_T6_MISS 0x1fU +#define V_T6_MISS(x) ((x) << S_T6_MISS) +#define G_T6_MISS(x) (((x) >> S_T6_MISS) & M_T6_MISS) + +#define S_NORM_FILT_HIT 15 +#define M_NORM_FILT_HIT 0x1fU +#define V_NORM_FILT_HIT(x) ((x) << S_NORM_FILT_HIT) +#define G_NORM_FILT_HIT(x) (((x) >> S_NORM_FILT_HIT) & M_NORM_FILT_HIT) + +#define S_HPRI_FILT_HIT 10 +#define M_HPRI_FILT_HIT 0x1fU +#define V_HPRI_FILT_HIT(x) ((x) << S_HPRI_FILT_HIT) +#define G_HPRI_FILT_HIT(x) (((x) >> S_HPRI_FILT_HIT) & M_HPRI_FILT_HIT) + +#define S_ACTV_OPEN_ERR 5 +#define M_ACTV_OPEN_ERR 0x1fU +#define V_ACTV_OPEN_ERR(x) ((x) << S_ACTV_OPEN_ERR) +#define G_ACTV_OPEN_ERR(x) (((x) >> S_ACTV_OPEN_ERR) & M_ACTV_OPEN_ERR) + +#define S_ACTV_FULL_ERR 0 +#define M_ACTV_FULL_ERR 0x1fU +#define V_ACTV_FULL_ERR(x) ((x) << S_ACTV_FULL_ERR) +#define G_ACTV_FULL_ERR(x) (((x) >> S_ACTV_FULL_ERR) & M_ACTV_FULL_ERR) + +#define A_LE_DB_RSP_CODE_2 0x19c7c + +#define S_SRCH_RGN_HIT 25 +#define M_SRCH_RGN_HIT 0x1fU +#define V_SRCH_RGN_HIT(x) ((x) << S_SRCH_RGN_HIT) +#define G_SRCH_RGN_HIT(x) (((x) >> S_SRCH_RGN_HIT) & M_SRCH_RGN_HIT) + +#define S_CLIP_FAIL 20 +#define M_CLIP_FAIL 0x1fU +#define V_CLIP_FAIL(x) ((x) << S_CLIP_FAIL) +#define G_CLIP_FAIL(x) (((x) >> S_CLIP_FAIL) & M_CLIP_FAIL) + +#define S_LIP_ZERO_ERR 15 +#define M_LIP_ZERO_ERR 0x1fU +#define V_LIP_ZERO_ERR(x) ((x) << S_LIP_ZERO_ERR) +#define G_LIP_ZERO_ERR(x) (((x) >> S_LIP_ZERO_ERR) & M_LIP_ZERO_ERR) + +#define S_UNKNOWN_CMD 10 +#define M_UNKNOWN_CMD 0x1fU +#define V_UNKNOWN_CMD(x) ((x) << S_UNKNOWN_CMD) +#define G_UNKNOWN_CMD(x) (((x) >> S_UNKNOWN_CMD) & M_UNKNOWN_CMD) + +#define S_CMD_TID_ERR 5 +#define M_CMD_TID_ERR 0x1fU +#define V_CMD_TID_ERR(x) ((x) << S_CMD_TID_ERR) +#define G_CMD_TID_ERR(x) (((x) >> S_CMD_TID_ERR) & M_CMD_TID_ERR) + +#define S_INTERNAL_ERR 0 +#define M_INTERNAL_ERR 0x1fU +#define V_INTERNAL_ERR(x) ((x) << S_INTERNAL_ERR) +#define G_INTERNAL_ERR(x) (((x) >> S_INTERNAL_ERR) & M_INTERNAL_ERR) + +#define A_LE_DB_RSP_CODE_3 0x19c80 + +#define S_SRAM_SRVR_HIT_ACTF 25 +#define M_SRAM_SRVR_HIT_ACTF 0x1fU +#define V_SRAM_SRVR_HIT_ACTF(x) ((x) << S_SRAM_SRVR_HIT_ACTF) +#define G_SRAM_SRVR_HIT_ACTF(x) (((x) >> S_SRAM_SRVR_HIT_ACTF) & M_SRAM_SRVR_HIT_ACTF) + +#define S_TCAM_SRVR_HIT_ACTF 20 +#define M_TCAM_SRVR_HIT_ACTF 0x1fU +#define V_TCAM_SRVR_HIT_ACTF(x) ((x) << S_TCAM_SRVR_HIT_ACTF) +#define G_TCAM_SRVR_HIT_ACTF(x) (((x) >> S_TCAM_SRVR_HIT_ACTF) & M_TCAM_SRVR_HIT_ACTF) + +#define S_INVLDRD 15 +#define M_INVLDRD 0x1fU +#define V_INVLDRD(x) ((x) << S_INVLDRD) +#define G_INVLDRD(x) (((x) >> S_INVLDRD) & M_INVLDRD) + +#define S_TUPLZERO 10 +#define M_TUPLZERO 0x1fU +#define V_TUPLZERO(x) ((x) << S_TUPLZERO) +#define G_TUPLZERO(x) (((x) >> S_TUPLZERO) & M_TUPLZERO) + #define A_LE_DB_ACT_CNT_IPV4_TCAM 0x19c94 #define A_LE_DB_ACT_CNT_IPV6_TCAM 0x19c98 #define A_LE_ACT_CNT_THRSH 0x19c9c @@ -28082,8 +39282,19 @@ #define G_ACT_CNT_THRSH(x) (((x) >> S_ACT_CNT_THRSH) & M_ACT_CNT_THRSH) #define A_LE_DB_MASK_IPV6 0x19ca0 +#define A_LE_DB_DBG_MATCH_DATA 0x19ca0 #define A_LE_DB_REQ_RSP_CNT 0x19ce4 +#define S_T4_RSPCNT 16 +#define M_T4_RSPCNT 0xffffU +#define V_T4_RSPCNT(x) ((x) << S_T4_RSPCNT) +#define G_T4_RSPCNT(x) (((x) >> S_T4_RSPCNT) & M_T4_RSPCNT) + +#define S_T4_REQCNT 0 +#define M_T4_REQCNT 0xffffU +#define V_T4_REQCNT(x) ((x) << S_T4_REQCNT) +#define G_T4_REQCNT(x) (((x) >> S_T4_REQCNT) & M_T4_REQCNT) + #define S_RSPCNTLE 16 #define M_RSPCNTLE 0xffffU #define V_RSPCNTLE(x) ((x) << S_RSPCNTLE) @@ -28151,6 +39362,14 @@ #define V_DBGICMDMODE(x) ((x) << S_DBGICMDMODE) #define G_DBGICMDMODE(x) (((x) >> S_DBGICMDMODE) & M_DBGICMDMODE) +#define S_DBGICMDMSKREAD 21 +#define V_DBGICMDMSKREAD(x) ((x) << S_DBGICMDMSKREAD) +#define F_DBGICMDMSKREAD V_DBGICMDMSKREAD(1U) + +#define S_DBGICMDWRITE 17 +#define V_DBGICMDWRITE(x) ((x) << S_DBGICMDWRITE) +#define F_DBGICMDWRITE V_DBGICMDWRITE(1U) + #define A_LE_DB_DBGI_REQ_TCAM_CMD 0x19cf4 #define S_DBGICMD 20 @@ -28163,6 +39382,13 @@ #define V_DBGITINDEX(x) ((x) << S_DBGITINDEX) #define G_DBGITINDEX(x) (((x) >> S_DBGITINDEX) & M_DBGITINDEX) +#define A_LE_DB_DBGI_REQ_CMD 0x19cf4 + +#define S_DBGITID 0 +#define M_DBGITID 0xfffffU +#define V_DBGITID(x) ((x) << S_DBGITID) +#define G_DBGITID(x) (((x) >> S_DBGITID) & M_DBGITID) + #define A_LE_PERR_ENABLE 0x19cf8 #define S_REQQUEUE 1 @@ -28189,6 +39415,23 @@ #define V_TCAMLE(x) ((x) << S_TCAMLE) #define F_TCAMLE V_TCAMLE(1U) +#define S_BKCHKPERIOD 22 +#define M_BKCHKPERIOD 0x3ffU +#define V_BKCHKPERIOD(x) ((x) << S_BKCHKPERIOD) +#define G_BKCHKPERIOD(x) (((x) >> S_BKCHKPERIOD) & M_BKCHKPERIOD) + +#define S_TCAMBKCHKEN 21 +#define V_TCAMBKCHKEN(x) ((x) << S_TCAMBKCHKEN) +#define F_TCAMBKCHKEN V_TCAMBKCHKEN(1U) + +#define S_T6_CLCAMFIFOERR 2 +#define V_T6_CLCAMFIFOERR(x) ((x) << S_T6_CLCAMFIFOERR) +#define F_T6_CLCAMFIFOERR V_T6_CLCAMFIFOERR(1U) + +#define S_T6_HASHTBLMEMCRCERR 1 +#define V_T6_HASHTBLMEMCRCERR(x) ((x) << S_T6_HASHTBLMEMCRCERR) +#define F_T6_HASHTBLMEMCRCERR V_T6_HASHTBLMEMCRCERR(1U) + #define A_LE_SPARE 0x19cfc #define A_LE_DB_DBGI_REQ_DATA 0x19d00 #define A_LE_DB_DBGI_REQ_MASK 0x19d50 @@ -28220,6 +39463,16 @@ #define V_DBGIRSPVALID(x) ((x) << S_DBGIRSPVALID) #define F_DBGIRSPVALID V_DBGIRSPVALID(1U) +#define S_DBGIRSPTID 12 +#define M_DBGIRSPTID 0xfffffU +#define V_DBGIRSPTID(x) ((x) << S_DBGIRSPTID) +#define G_DBGIRSPTID(x) (((x) >> S_DBGIRSPTID) & M_DBGIRSPTID) + +#define S_DBGIRSPLEARN 2 +#define V_DBGIRSPLEARN(x) ((x) << S_DBGIRSPLEARN) +#define F_DBGIRSPLEARN V_DBGIRSPLEARN(1U) + +#define A_LE_DBG_SEL 0x19d98 #define A_LE_DB_DBGI_RSP_DATA 0x19da0 #define A_LE_DB_DBGI_RSP_LAST_CMD 0x19de4 @@ -28259,6 +39512,13 @@ #define V_SVRBASE_ADDR(x) ((x) << S_SVRBASE_ADDR) #define G_SVRBASE_ADDR(x) (((x) >> S_SVRBASE_ADDR) & M_SVRBASE_ADDR) +#define A_LE_DB_TCAM_TID_BASE 0x19df0 + +#define S_TCAM_TID_BASE 0 +#define M_TCAM_TID_BASE 0xfffffU +#define V_TCAM_TID_BASE(x) ((x) << S_TCAM_TID_BASE) +#define G_TCAM_TID_BASE(x) (((x) >> S_TCAM_TID_BASE) & M_TCAM_TID_BASE) + #define A_LE_DB_FTID_FLTRBASE 0x19df4 #define S_FLTRBASE_ADDR 2 @@ -28266,6 +39526,13 @@ #define V_FLTRBASE_ADDR(x) ((x) << S_FLTRBASE_ADDR) #define G_FLTRBASE_ADDR(x) (((x) >> S_FLTRBASE_ADDR) & M_FLTRBASE_ADDR) +#define A_LE_DB_CLCAM_TID_BASE 0x19df4 + +#define S_CLCAM_TID_BASE 0 +#define M_CLCAM_TID_BASE 0xfffffU +#define V_CLCAM_TID_BASE(x) ((x) << S_CLCAM_TID_BASE) +#define G_CLCAM_TID_BASE(x) (((x) >> S_CLCAM_TID_BASE) & M_CLCAM_TID_BASE) + #define A_LE_DB_TID_HASHBASE 0x19df8 #define S_HASHBASE_ADDR 2 @@ -28273,6 +39540,13 @@ #define V_HASHBASE_ADDR(x) ((x) << S_HASHBASE_ADDR) #define G_HASHBASE_ADDR(x) (((x) >> S_HASHBASE_ADDR) & M_HASHBASE_ADDR) +#define A_T6_LE_DB_HASH_TID_BASE 0x19df8 + +#define S_HASH_TID_BASE 0 +#define M_HASH_TID_BASE 0xfffffU +#define V_HASH_TID_BASE(x) ((x) << S_HASH_TID_BASE) +#define G_HASH_TID_BASE(x) (((x) >> S_HASH_TID_BASE) & M_HASH_TID_BASE) + #define A_LE_PERR_INJECT 0x19dfc #define S_LEMEMSEL 1 @@ -28280,6 +39554,13 @@ #define V_LEMEMSEL(x) ((x) << S_LEMEMSEL) #define G_LEMEMSEL(x) (((x) >> S_LEMEMSEL) & M_LEMEMSEL) +#define A_LE_DB_SSRAM_TID_BASE 0x19dfc + +#define S_SSRAM_TID_BASE 0 +#define M_SSRAM_TID_BASE 0xfffffU +#define V_SSRAM_TID_BASE(x) ((x) << S_SSRAM_TID_BASE) +#define G_SSRAM_TID_BASE(x) (((x) >> S_SSRAM_TID_BASE) & M_SSRAM_TID_BASE) + #define A_LE_DB_ACTIVE_MASK_IPV4 0x19e00 #define A_LE_T5_DB_ACTIVE_MASK_IPV4 0x19e00 #define A_LE_DB_ACTIVE_MASK_IPV6 0x19e50 @@ -28287,13 +39568,18 @@ #define A_LE_HASH_MASK_GEN_IPV4T5 0x19ea0 #define A_LE_HASH_MASK_GEN_IPV6 0x19eb0 #define A_LE_HASH_MASK_GEN_IPV6T5 0x19eb4 +#define A_T6_LE_HASH_MASK_GEN_IPV6T5 0x19ec4 #define A_LE_HASH_MASK_CMP_IPV4 0x19ee0 #define A_LE_HASH_MASK_CMP_IPV4T5 0x19ee4 +#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV4 0x19ee4 #define A_LE_HASH_MASK_CMP_IPV6 0x19ef0 +#define A_LE_DB_PSV_FILTER_MASK_FLT_IPV4 0x19ef0 #define A_LE_HASH_MASK_CMP_IPV6T5 0x19ef8 +#define A_LE_DB_PSV_FILTER_MASK_TUP_IPV6 0x19f04 #define A_LE_DEBUG_LA_CONFIG 0x19f20 #define A_LE_REQ_DEBUG_LA_DATA 0x19f24 #define A_LE_REQ_DEBUG_LA_WRPTR 0x19f28 +#define A_LE_DB_PSV_FILTER_MASK_FLT_IPV6 0x19f28 #define A_LE_RSP_DEBUG_LA_DATA 0x19f2c #define A_LE_RSP_DEBUG_LA_WRPTR 0x19f30 #define A_LE_DEBUG_LA_SELECTOR 0x19f34 @@ -28312,6 +39598,20 @@ #define V_SRVRINIT(x) ((x) << S_SRVRINIT) #define F_SRVRINIT V_SRVRINIT(1U) +#define A_LE_DB_SRVR_SRAM_CONFIG 0x19f34 + +#define S_PRI_HFILT 4 +#define V_PRI_HFILT(x) ((x) << S_PRI_HFILT) +#define F_PRI_HFILT V_PRI_HFILT(1U) + +#define S_PRI_SRVR 3 +#define V_PRI_SRVR(x) ((x) << S_PRI_SRVR) +#define F_PRI_SRVR V_PRI_SRVR(1U) + +#define S_PRI_FILT 2 +#define V_PRI_FILT(x) ((x) << S_PRI_FILT) +#define F_PRI_FILT V_PRI_FILT(1U) + #define A_LE_DEBUG_LA_CAPTURED_DATA 0x19f38 #define A_LE_SRVR_VF_SRCH_TABLE 0x19f38 @@ -28334,7 +39634,38 @@ #define V_SRCHLADDR(x) ((x) << S_SRCHLADDR) #define G_SRCHLADDR(x) (((x) >> S_SRCHLADDR) & M_SRCHLADDR) +#define A_LE_DB_SRVR_VF_SRCH_TABLE_CTRL 0x19f38 + +#define S_VFLUTBUSY 10 +#define V_VFLUTBUSY(x) ((x) << S_VFLUTBUSY) +#define F_VFLUTBUSY V_VFLUTBUSY(1U) + +#define S_VFLUTSTART 9 +#define V_VFLUTSTART(x) ((x) << S_VFLUTSTART) +#define F_VFLUTSTART V_VFLUTSTART(1U) + +#define S_T6_RDWR 8 +#define V_T6_RDWR(x) ((x) << S_T6_RDWR) +#define F_T6_RDWR V_T6_RDWR(1U) + +#define S_T6_VFINDEX 0 +#define M_T6_VFINDEX 0xffU +#define V_T6_VFINDEX(x) ((x) << S_T6_VFINDEX) +#define G_T6_VFINDEX(x) (((x) >> S_T6_VFINDEX) & M_T6_VFINDEX) + #define A_LE_MA_DEBUG_LA_DATA 0x19f3c +#define A_LE_DB_SRVR_VF_SRCH_TABLE_DATA 0x19f3c + +#define S_T6_SRCHHADDR 12 +#define M_T6_SRCHHADDR 0xfffU +#define V_T6_SRCHHADDR(x) ((x) << S_T6_SRCHHADDR) +#define G_T6_SRCHHADDR(x) (((x) >> S_T6_SRCHHADDR) & M_T6_SRCHHADDR) + +#define S_T6_SRCHLADDR 0 +#define M_T6_SRCHLADDR 0xfffU +#define V_T6_SRCHLADDR(x) ((x) << S_T6_SRCHLADDR) +#define G_T6_SRCHLADDR(x) (((x) >> S_T6_SRCHLADDR) & M_T6_SRCHLADDR) + #define A_LE_RSP_DEBUG_LA_HASH_WRPTR 0x19f40 #define A_LE_DB_SECOND_ACTIVE_MASK_IPV4 0x19f40 #define A_LE_HASH_DEBUG_LA_DATA 0x19f44 @@ -29494,6 +40825,11 @@ #define V_RXSOP(x) ((x) << S_RXSOP) #define G_RXSOP(x) (((x) >> S_RXSOP) & M_RXSOP) +#define S_T4_RXEOP 0 +#define M_T4_RXEOP 0xffU +#define V_T4_RXEOP(x) ((x) << S_T4_RXEOP) +#define G_T4_RXEOP(x) (((x) >> S_T4_RXEOP) & M_T4_RXEOP) + #define A_XGMAC_PORT_LINK_STATUS 0x1034 #define S_REMFLT 3 @@ -32859,6 +44195,10 @@ #define V_QUEBAREADDR(x) ((x) << S_QUEBAREADDR) #define F_QUEBAREADDR V_QUEBAREADDR(1U) +#define S_QUE1KEN 6 +#define V_QUE1KEN(x) ((x) << S_QUE1KEN) +#define F_QUE1KEN V_QUE1KEN(1U) + #define A_UP_IBQ_0_REALADDR 0xd4 #define S_QUERDADDRWRAP 31 @@ -32998,6 +44338,11 @@ #define V_T5_UPRID(x) ((x) << S_T5_UPRID) #define G_T5_UPRID(x) (((x) >> S_T5_UPRID) & M_T5_UPRID) +#define S_T6_UPRID 0 +#define M_T6_UPRID 0x1ffU +#define V_T6_UPRID(x) ((x) << S_T6_UPRID) +#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID) + #define A_UP_UP_SELF_CONTROL 0x14c #define S_UPSELFRESET 0 @@ -33063,6 +44408,18 @@ #define V_TSCHCHNLCCNT(x) ((x) << S_TSCHCHNLCCNT) #define G_TSCHCHNLCCNT(x) (((x) >> S_TSCHCHNLCCNT) & M_TSCHCHNLCCNT) +#define S_TSCHCHNLCHDIS 31 +#define V_TSCHCHNLCHDIS(x) ((x) << S_TSCHCHNLCHDIS) +#define F_TSCHCHNLCHDIS V_TSCHCHNLCHDIS(1U) + +#define S_TSCHCHNLWDIS 30 +#define V_TSCHCHNLWDIS(x) ((x) << S_TSCHCHNLWDIS) +#define F_TSCHCHNLWDIS V_TSCHCHNLWDIS(1U) + +#define S_TSCHCHNLCLDIS 29 +#define V_TSCHCHNLCLDIS(x) ((x) << S_TSCHCHNLCLDIS) +#define F_TSCHCHNLCLDIS V_TSCHCHNLCLDIS(1U) + #define A_UP_UPLADBGPCCHKDATA_0 0x240 #define A_UP_UPLADBGPCCHKMASK_0 0x244 #define A_UP_UPLADBGPCCHKDATA_1 0x250 @@ -33194,6 +44551,14 @@ #define V_PREFEN(x) ((x) << S_PREFEN) #define F_PREFEN V_PREFEN(1U) +#define S_DISSLOWTIMEOUT 14 +#define V_DISSLOWTIMEOUT(x) ((x) << S_DISSLOWTIMEOUT) +#define F_DISSLOWTIMEOUT V_DISSLOWTIMEOUT(1U) + +#define S_INTLRSPEN 9 +#define V_INTLRSPEN(x) ((x) << S_INTLRSPEN) +#define F_INTLRSPEN V_INTLRSPEN(1U) + #define A_CIM_CTL_PREFADDR 0x4 #define A_CIM_CTL_ALLOCADDR 0x8 #define A_CIM_CTL_INVLDTADDR 0xc @@ -33291,6 +44656,10 @@ #define V_TSCHNRESET(x) ((x) << S_TSCHNRESET) #define F_TSCHNRESET V_TSCHNRESET(1U) +#define S_T6_MIN_MAX_EN 29 +#define V_T6_MIN_MAX_EN(x) ((x) << S_T6_MIN_MAX_EN) +#define F_T6_MIN_MAX_EN V_T6_MIN_MAX_EN(1U) + #define A_CIM_CTL_TSCH_CHNLN_TICK 0x904 #define S_TSCHNLTICK 0 @@ -33298,6 +44667,72 @@ #define V_TSCHNLTICK(x) ((x) << S_TSCHNLTICK) #define G_TSCHNLTICK(x) (((x) >> S_TSCHNLTICK) & M_TSCHNLTICK) +#define A_CIM_CTL_TSCH_CHNLN_CLASS_RATECTL 0x904 + +#define S_TSC15RATECTL 15 +#define V_TSC15RATECTL(x) ((x) << S_TSC15RATECTL) +#define F_TSC15RATECTL V_TSC15RATECTL(1U) + +#define S_TSC14RATECTL 14 +#define V_TSC14RATECTL(x) ((x) << S_TSC14RATECTL) +#define F_TSC14RATECTL V_TSC14RATECTL(1U) + +#define S_TSC13RATECTL 13 +#define V_TSC13RATECTL(x) ((x) << S_TSC13RATECTL) +#define F_TSC13RATECTL V_TSC13RATECTL(1U) + +#define S_TSC12RATECTL 12 +#define V_TSC12RATECTL(x) ((x) << S_TSC12RATECTL) +#define F_TSC12RATECTL V_TSC12RATECTL(1U) + +#define S_TSC11RATECTL 11 +#define V_TSC11RATECTL(x) ((x) << S_TSC11RATECTL) +#define F_TSC11RATECTL V_TSC11RATECTL(1U) + +#define S_TSC10RATECTL 10 +#define V_TSC10RATECTL(x) ((x) << S_TSC10RATECTL) +#define F_TSC10RATECTL V_TSC10RATECTL(1U) + +#define S_TSC9RATECTL 9 +#define V_TSC9RATECTL(x) ((x) << S_TSC9RATECTL) +#define F_TSC9RATECTL V_TSC9RATECTL(1U) + +#define S_TSC8RATECTL 8 +#define V_TSC8RATECTL(x) ((x) << S_TSC8RATECTL) +#define F_TSC8RATECTL V_TSC8RATECTL(1U) + +#define S_TSC7RATECTL 7 +#define V_TSC7RATECTL(x) ((x) << S_TSC7RATECTL) +#define F_TSC7RATECTL V_TSC7RATECTL(1U) + +#define S_TSC6RATECTL 6 +#define V_TSC6RATECTL(x) ((x) << S_TSC6RATECTL) +#define F_TSC6RATECTL V_TSC6RATECTL(1U) + +#define S_TSC5RATECTL 5 +#define V_TSC5RATECTL(x) ((x) << S_TSC5RATECTL) +#define F_TSC5RATECTL V_TSC5RATECTL(1U) + +#define S_TSC4RATECTL 4 +#define V_TSC4RATECTL(x) ((x) << S_TSC4RATECTL) +#define F_TSC4RATECTL V_TSC4RATECTL(1U) + +#define S_TSC3RATECTL 3 +#define V_TSC3RATECTL(x) ((x) << S_TSC3RATECTL) +#define F_TSC3RATECTL V_TSC3RATECTL(1U) + +#define S_TSC2RATECTL 2 +#define V_TSC2RATECTL(x) ((x) << S_TSC2RATECTL) +#define F_TSC2RATECTL V_TSC2RATECTL(1U) + +#define S_TSC1RATECTL 1 +#define V_TSC1RATECTL(x) ((x) << S_TSC1RATECTL) +#define F_TSC1RATECTL V_TSC1RATECTL(1U) + +#define S_TSC0RATECTL 0 +#define V_TSC0RATECTL(x) ((x) << S_TSC0RATECTL) +#define F_TSC0RATECTL V_TSC0RATECTL(1U) + #define A_CIM_CTL_TSCH_CHNLN_CLASS_ENABLE_A 0x908 #define S_TSC15WRREN 31 @@ -33445,6 +44880,15 @@ #define V_TSCHNLRATEL(x) ((x) << S_TSCHNLRATEL) #define G_TSCHNLRATEL(x) (((x) >> S_TSCHNLRATEL) & M_TSCHNLRATEL) +#define S_TSCHNLRATEPROT 30 +#define V_TSCHNLRATEPROT(x) ((x) << S_TSCHNLRATEPROT) +#define F_TSCHNLRATEPROT V_TSCHNLRATEPROT(1U) + +#define S_T6_TSCHNLRATEL 0 +#define M_T6_TSCHNLRATEL 0x3fffffffU +#define V_T6_TSCHNLRATEL(x) ((x) << S_T6_TSCHNLRATEL) +#define G_T6_TSCHNLRATEL(x) (((x) >> S_T6_TSCHNLRATEL) & M_T6_TSCHNLRATEL) + #define A_CIM_CTL_TSCH_CHNLN_RATE_PROPERTIES 0x914 #define S_TSCHNLRMAX 16 @@ -33457,6 +44901,16 @@ #define V_TSCHNLRINCR(x) ((x) << S_TSCHNLRINCR) #define G_TSCHNLRINCR(x) (((x) >> S_TSCHNLRINCR) & M_TSCHNLRINCR) +#define S_TSCHNLRTSEL 14 +#define M_TSCHNLRTSEL 0x3U +#define V_TSCHNLRTSEL(x) ((x) << S_TSCHNLRTSEL) +#define G_TSCHNLRTSEL(x) (((x) >> S_TSCHNLRTSEL) & M_TSCHNLRTSEL) + +#define S_T6_TSCHNLRINCR 0 +#define M_T6_TSCHNLRINCR 0x3fffU +#define V_T6_TSCHNLRINCR(x) ((x) << S_T6_TSCHNLRINCR) +#define G_T6_TSCHNLRINCR(x) (((x) >> S_T6_TSCHNLRINCR) & M_T6_TSCHNLRINCR) + #define A_CIM_CTL_TSCH_CHNLN_WRR 0x918 #define A_CIM_CTL_TSCH_CHNLN_WEIGHT 0x91c @@ -33476,6 +44930,10 @@ #define V_TSCCLRATEL(x) ((x) << S_TSCCLRATEL) #define G_TSCCLRATEL(x) (((x) >> S_TSCCLRATEL) & M_TSCCLRATEL) +#define S_TSCCLRATEPROT 30 +#define V_TSCCLRATEPROT(x) ((x) << S_TSCCLRATEPROT) +#define F_TSCCLRATEPROT V_TSCCLRATEPROT(1U) + #define A_CIM_CTL_TSCH_CHNLN_CLASSM_RATE_PROPERTIES 0x924 #define S_TSCCLRMAX 16 @@ -33488,6 +44946,16 @@ #define V_TSCCLRINCR(x) ((x) << S_TSCCLRINCR) #define G_TSCCLRINCR(x) (((x) >> S_TSCCLRINCR) & M_TSCCLRINCR) +#define S_TSCCLRTSEL 14 +#define M_TSCCLRTSEL 0x3U +#define V_TSCCLRTSEL(x) ((x) << S_TSCCLRTSEL) +#define G_TSCCLRTSEL(x) (((x) >> S_TSCCLRTSEL) & M_TSCCLRTSEL) + +#define S_T6_TSCCLRINCR 0 +#define M_T6_TSCCLRINCR 0x3fffU +#define V_T6_TSCCLRINCR(x) ((x) << S_T6_TSCCLRINCR) +#define G_T6_TSCCLRINCR(x) (((x) >> S_T6_TSCCLRINCR) & M_T6_TSCCLRINCR) + #define A_CIM_CTL_TSCH_CHNLN_CLASSM_WRR 0x928 #define S_TSCCLWRRNEG 31 @@ -33499,6 +44967,10 @@ #define V_TSCCLWRR(x) ((x) << S_TSCCLWRR) #define G_TSCCLWRR(x) (((x) >> S_TSCCLWRR) & M_TSCCLWRR) +#define S_TSCCLWRRPROT 30 +#define V_TSCCLWRRPROT(x) ((x) << S_TSCCLWRRPROT) +#define F_TSCCLWRRPROT V_TSCCLWRRPROT(1U) + #define A_CIM_CTL_TSCH_CHNLN_CLASSM_WEIGHT 0x92c #define S_TSCCLWEIGHT 0 @@ -33506,14 +44978,33 @@ #define V_TSCCLWEIGHT(x) ((x) << S_TSCCLWEIGHT) #define G_TSCCLWEIGHT(x) (((x) >> S_TSCCLWEIGHT) & M_TSCCLWEIGHT) +#define S_PAUSEVECSEL 28 +#define M_PAUSEVECSEL 0x3U +#define V_PAUSEVECSEL(x) ((x) << S_PAUSEVECSEL) +#define G_PAUSEVECSEL(x) (((x) >> S_PAUSEVECSEL) & M_PAUSEVECSEL) + +#define S_MPSPAUSEMASK 20 +#define M_MPSPAUSEMASK 0xffU +#define V_MPSPAUSEMASK(x) ((x) << S_MPSPAUSEMASK) +#define G_MPSPAUSEMASK(x) (((x) >> S_MPSPAUSEMASK) & M_MPSPAUSEMASK) + +#define A_CIM_CTL_TSCH_TICK0 0xd80 #define A_CIM_CTL_MAILBOX_PF0_CTL 0xd84 +#define A_CIM_CTL_TSCH_TICK1 0xd84 #define A_CIM_CTL_MAILBOX_PF1_CTL 0xd88 +#define A_CIM_CTL_TSCH_TICK2 0xd88 #define A_CIM_CTL_MAILBOX_PF2_CTL 0xd8c +#define A_CIM_CTL_TSCH_TICK3 0xd8c #define A_CIM_CTL_MAILBOX_PF3_CTL 0xd90 +#define A_T6_CIM_CTL_MAILBOX_PF0_CTL 0xd90 #define A_CIM_CTL_MAILBOX_PF4_CTL 0xd94 +#define A_T6_CIM_CTL_MAILBOX_PF1_CTL 0xd94 #define A_CIM_CTL_MAILBOX_PF5_CTL 0xd98 +#define A_T6_CIM_CTL_MAILBOX_PF2_CTL 0xd98 #define A_CIM_CTL_MAILBOX_PF6_CTL 0xd9c +#define A_T6_CIM_CTL_MAILBOX_PF3_CTL 0xd9c #define A_CIM_CTL_MAILBOX_PF7_CTL 0xda0 +#define A_T6_CIM_CTL_MAILBOX_PF4_CTL 0xda0 #define A_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xda4 #define S_PF7_OWNER_PL 15 @@ -33580,6 +45071,7 @@ #define V_PF0_OWNER_UP(x) ((x) << S_PF0_OWNER_UP) #define F_PF0_OWNER_UP V_PF0_OWNER_UP(1U) +#define A_T6_CIM_CTL_MAILBOX_PF5_CTL 0xda4 #define A_CIM_CTL_PIO_MST_CONFIG 0xda8 #define S_T5_CTLRID 0 @@ -33587,6 +45079,42 @@ #define V_T5_CTLRID(x) ((x) << S_T5_CTLRID) #define G_T5_CTLRID(x) (((x) >> S_T5_CTLRID) & M_T5_CTLRID) +#define A_T6_CIM_CTL_MAILBOX_PF6_CTL 0xda8 +#define A_T6_CIM_CTL_MAILBOX_PF7_CTL 0xdac +#define A_T6_CIM_CTL_MAILBOX_CTL_OWNER_COPY 0xdb0 +#define A_T6_CIM_CTL_PIO_MST_CONFIG 0xdb4 + +#define S_T6_UPRID 0 +#define M_T6_UPRID 0x1ffU +#define V_T6_UPRID(x) ((x) << S_T6_UPRID) +#define G_T6_UPRID(x) (((x) >> S_T6_UPRID) & M_T6_UPRID) + +#define A_CIM_CTL_ULP_OBQ0_PAUSE_MASK 0xe00 +#define A_CIM_CTL_ULP_OBQ1_PAUSE_MASK 0xe04 +#define A_CIM_CTL_ULP_OBQ2_PAUSE_MASK 0xe08 +#define A_CIM_CTL_ULP_OBQ3_PAUSE_MASK 0xe0c +#define A_CIM_CTL_ULP_OBQ_CONFIG 0xe10 + +#define S_CH1_PRIO_EN 1 +#define V_CH1_PRIO_EN(x) ((x) << S_CH1_PRIO_EN) +#define F_CH1_PRIO_EN V_CH1_PRIO_EN(1U) + +#define S_CH0_PRIO_EN 0 +#define V_CH0_PRIO_EN(x) ((x) << S_CH0_PRIO_EN) +#define F_CH0_PRIO_EN V_CH0_PRIO_EN(1U) + +#define A_CIM_CTL_PIF_TIMEOUT 0xe40 + +#define S_SLOW_TIMEOUT 16 +#define M_SLOW_TIMEOUT 0xffffU +#define V_SLOW_TIMEOUT(x) ((x) << S_SLOW_TIMEOUT) +#define G_SLOW_TIMEOUT(x) (((x) >> S_SLOW_TIMEOUT) & M_SLOW_TIMEOUT) + +#define S_MA_TIMEOUT 0 +#define M_MA_TIMEOUT 0xffffU +#define V_MA_TIMEOUT(x) ((x) << S_MA_TIMEOUT) +#define G_MA_TIMEOUT(x) (((x) >> S_MA_TIMEOUT) & M_MA_TIMEOUT) + /* registers for module MAC */ #define MAC_BASE_ADDR 0x0 @@ -33610,6 +45138,52 @@ #define V_PORTSPEED(x) ((x) << S_PORTSPEED) #define G_PORTSPEED(x) (((x) >> S_PORTSPEED) & M_PORTSPEED) +#define S_ENA_ERR_RSP 28 +#define V_ENA_ERR_RSP(x) ((x) << S_ENA_ERR_RSP) +#define F_ENA_ERR_RSP V_ENA_ERR_RSP(1U) + +#define S_DEBUG_CLR 25 +#define V_DEBUG_CLR(x) ((x) << S_DEBUG_CLR) +#define F_DEBUG_CLR V_DEBUG_CLR(1U) + +#define S_PLL_SEL 23 +#define V_PLL_SEL(x) ((x) << S_PLL_SEL) +#define F_PLL_SEL V_PLL_SEL(1U) + +#define S_PORT_MAP 20 +#define M_PORT_MAP 0x7U +#define V_PORT_MAP(x) ((x) << S_PORT_MAP) +#define G_PORT_MAP(x) (((x) >> S_PORT_MAP) & M_PORT_MAP) + +#define S_AEC_PAT_DATA 15 +#define V_AEC_PAT_DATA(x) ((x) << S_AEC_PAT_DATA) +#define F_AEC_PAT_DATA V_AEC_PAT_DATA(1U) + +#define S_MACCLK_SEL 13 +#define V_MACCLK_SEL(x) ((x) << S_MACCLK_SEL) +#define F_MACCLK_SEL V_MACCLK_SEL(1U) + +#define S_XGMII_SEL 12 +#define V_XGMII_SEL(x) ((x) << S_XGMII_SEL) +#define F_XGMII_SEL V_XGMII_SEL(1U) + +#define S_DEBUG_PORT_SEL 10 +#define M_DEBUG_PORT_SEL 0x3U +#define V_DEBUG_PORT_SEL(x) ((x) << S_DEBUG_PORT_SEL) +#define G_DEBUG_PORT_SEL(x) (((x) >> S_DEBUG_PORT_SEL) & M_DEBUG_PORT_SEL) + +#define S_ENABLE_25G 7 +#define V_ENABLE_25G(x) ((x) << S_ENABLE_25G) +#define F_ENABLE_25G V_ENABLE_25G(1U) + +#define S_ENABLE_50G 6 +#define V_ENABLE_50G(x) ((x) << S_ENABLE_50G) +#define F_ENABLE_50G V_ENABLE_50G(1U) + +#define S_DEBUG_TX_RX_SEL 1 +#define V_DEBUG_TX_RX_SEL(x) ((x) << S_DEBUG_TX_RX_SEL) +#define F_DEBUG_TX_RX_SEL V_DEBUG_TX_RX_SEL(1U) + #define A_MAC_PORT_RESET_CTRL 0x804 #define S_TWGDSK_HSSC16B 31 @@ -33712,11 +45286,67 @@ #define V_MTIPSD0RXRST(x) ((x) << S_MTIPSD0RXRST) #define F_MTIPSD0RXRST V_MTIPSD0RXRST(1U) +#define S_MAC100G40G_RESET 27 +#define V_MAC100G40G_RESET(x) ((x) << S_MAC100G40G_RESET) +#define F_MAC100G40G_RESET V_MAC100G40G_RESET(1U) + +#define S_MAC10G1G_RESET 26 +#define V_MAC10G1G_RESET(x) ((x) << S_MAC10G1G_RESET) +#define F_MAC10G1G_RESET V_MAC10G1G_RESET(1U) + +#define S_PCS1G_RESET 24 +#define V_PCS1G_RESET(x) ((x) << S_PCS1G_RESET) +#define F_PCS1G_RESET V_PCS1G_RESET(1U) + +#define S_PCS10G_RESET 15 +#define V_PCS10G_RESET(x) ((x) << S_PCS10G_RESET) +#define F_PCS10G_RESET V_PCS10G_RESET(1U) + +#define S_PCS40G_RESET 14 +#define V_PCS40G_RESET(x) ((x) << S_PCS40G_RESET) +#define F_PCS40G_RESET V_PCS40G_RESET(1U) + +#define S_PCS100G_RESET 13 +#define V_PCS100G_RESET(x) ((x) << S_PCS100G_RESET) +#define F_PCS100G_RESET V_PCS100G_RESET(1U) + #define A_MAC_PORT_LED_CFG 0x808 + +#define S_LED1_CFG1 14 +#define M_LED1_CFG1 0x3U +#define V_LED1_CFG1(x) ((x) << S_LED1_CFG1) +#define G_LED1_CFG1(x) (((x) >> S_LED1_CFG1) & M_LED1_CFG1) + +#define S_LED0_CFG1 12 +#define M_LED0_CFG1 0x3U +#define V_LED0_CFG1(x) ((x) << S_LED0_CFG1) +#define G_LED0_CFG1(x) (((x) >> S_LED0_CFG1) & M_LED0_CFG1) + +#define S_LED1_TLO 11 +#define V_LED1_TLO(x) ((x) << S_LED1_TLO) +#define F_LED1_TLO V_LED1_TLO(1U) + +#define S_LED1_THI 10 +#define V_LED1_THI(x) ((x) << S_LED1_THI) +#define F_LED1_THI V_LED1_THI(1U) + +#define S_LED0_TLO 9 +#define V_LED0_TLO(x) ((x) << S_LED0_TLO) +#define F_LED0_TLO V_LED0_TLO(1U) + +#define S_LED0_THI 8 +#define V_LED0_THI(x) ((x) << S_LED0_THI) +#define F_LED0_THI V_LED0_THI(1U) + #define A_MAC_PORT_LED_COUNTHI 0x80c #define A_MAC_PORT_LED_COUNTLO 0x810 #define A_MAC_PORT_CFG3 0x814 +#define S_T5_FPGA_PTP_PORT 26 +#define M_T5_FPGA_PTP_PORT 0x3U +#define V_T5_FPGA_PTP_PORT(x) ((x) << S_T5_FPGA_PTP_PORT) +#define G_T5_FPGA_PTP_PORT(x) (((x) >> S_T5_FPGA_PTP_PORT) & M_T5_FPGA_PTP_PORT) + #define S_FCSDISCTRL 25 #define V_FCSDISCTRL(x) ((x) << S_FCSDISCTRL) #define F_FCSDISCTRL V_FCSDISCTRL(1U) @@ -33766,6 +45396,24 @@ #define V_HSSC16C20SEL(x) ((x) << S_HSSC16C20SEL) #define G_HSSC16C20SEL(x) (((x) >> S_HSSC16C20SEL) & M_HSSC16C20SEL) +#define S_REF_CLK_SEL 30 +#define M_REF_CLK_SEL 0x3U +#define V_REF_CLK_SEL(x) ((x) << S_REF_CLK_SEL) +#define G_REF_CLK_SEL(x) (((x) >> S_REF_CLK_SEL) & M_REF_CLK_SEL) + +#define S_SGMII_SD_SIG_DET 29 +#define V_SGMII_SD_SIG_DET(x) ((x) << S_SGMII_SD_SIG_DET) +#define F_SGMII_SD_SIG_DET V_SGMII_SD_SIG_DET(1U) + +#define S_SGMII_SGPCS_ENA 28 +#define V_SGMII_SGPCS_ENA(x) ((x) << S_SGMII_SGPCS_ENA) +#define F_SGMII_SGPCS_ENA V_SGMII_SGPCS_ENA(1U) + +#define S_MAC_FPGA_PTP_PORT 26 +#define M_MAC_FPGA_PTP_PORT 0x3U +#define V_MAC_FPGA_PTP_PORT(x) ((x) << S_MAC_FPGA_PTP_PORT) +#define G_MAC_FPGA_PTP_PORT(x) (((x) >> S_MAC_FPGA_PTP_PORT) & M_MAC_FPGA_PTP_PORT) + #define A_MAC_PORT_CFG2 0x818 #define S_T5_AEC_PMA_TX_READY 4 @@ -33778,6 +45426,10 @@ #define V_T5_AEC_PMA_RX_READY(x) ((x) << S_T5_AEC_PMA_RX_READY) #define G_T5_AEC_PMA_RX_READY(x) (((x) >> S_T5_AEC_PMA_RX_READY) & M_T5_AEC_PMA_RX_READY) +#define S_AN_DATA_CTL 19 +#define V_AN_DATA_CTL(x) ((x) << S_AN_DATA_CTL) +#define F_AN_DATA_CTL V_AN_DATA_CTL(1U) + #define A_MAC_PORT_PKT_COUNT 0x81c #define A_MAC_PORT_CFG4 0x820 @@ -33823,6 +45475,266 @@ #define A_MAC_PORT_MAGIC_MACID_LO 0x824 #define A_MAC_PORT_MAGIC_MACID_HI 0x828 +#define A_MAC_PORT_MTIP_RESET_CTRL 0x82c + +#define S_AN_RESET_SD_TX_CLK 31 +#define V_AN_RESET_SD_TX_CLK(x) ((x) << S_AN_RESET_SD_TX_CLK) +#define F_AN_RESET_SD_TX_CLK V_AN_RESET_SD_TX_CLK(1U) + +#define S_AN_RESET_SD_RX_CLK 30 +#define V_AN_RESET_SD_RX_CLK(x) ((x) << S_AN_RESET_SD_RX_CLK) +#define F_AN_RESET_SD_RX_CLK V_AN_RESET_SD_RX_CLK(1U) + +#define S_SGMII_RESET_TX_CLK 29 +#define V_SGMII_RESET_TX_CLK(x) ((x) << S_SGMII_RESET_TX_CLK) +#define F_SGMII_RESET_TX_CLK V_SGMII_RESET_TX_CLK(1U) + +#define S_SGMII_RESET_RX_CLK 28 +#define V_SGMII_RESET_RX_CLK(x) ((x) << S_SGMII_RESET_RX_CLK) +#define F_SGMII_RESET_RX_CLK V_SGMII_RESET_RX_CLK(1U) + +#define S_SGMII_RESET_REF_CLK 27 +#define V_SGMII_RESET_REF_CLK(x) ((x) << S_SGMII_RESET_REF_CLK) +#define F_SGMII_RESET_REF_CLK V_SGMII_RESET_REF_CLK(1U) + +#define S_PCS10G_RESET_XFI_RXCLK 26 +#define V_PCS10G_RESET_XFI_RXCLK(x) ((x) << S_PCS10G_RESET_XFI_RXCLK) +#define F_PCS10G_RESET_XFI_RXCLK V_PCS10G_RESET_XFI_RXCLK(1U) + +#define S_PCS10G_RESET_XFI_TXCLK 25 +#define V_PCS10G_RESET_XFI_TXCLK(x) ((x) << S_PCS10G_RESET_XFI_TXCLK) +#define F_PCS10G_RESET_XFI_TXCLK V_PCS10G_RESET_XFI_TXCLK(1U) + +#define S_PCS10G_RESET_SD_TX_CLK 24 +#define V_PCS10G_RESET_SD_TX_CLK(x) ((x) << S_PCS10G_RESET_SD_TX_CLK) +#define F_PCS10G_RESET_SD_TX_CLK V_PCS10G_RESET_SD_TX_CLK(1U) + +#define S_PCS10G_RESET_SD_RX_CLK 23 +#define V_PCS10G_RESET_SD_RX_CLK(x) ((x) << S_PCS10G_RESET_SD_RX_CLK) +#define F_PCS10G_RESET_SD_RX_CLK V_PCS10G_RESET_SD_RX_CLK(1U) + +#define S_PCS40G_RESET_RXCLK 22 +#define V_PCS40G_RESET_RXCLK(x) ((x) << S_PCS40G_RESET_RXCLK) +#define F_PCS40G_RESET_RXCLK V_PCS40G_RESET_RXCLK(1U) + +#define S_PCS40G_RESET_SD_TX_CLK 21 +#define V_PCS40G_RESET_SD_TX_CLK(x) ((x) << S_PCS40G_RESET_SD_TX_CLK) +#define F_PCS40G_RESET_SD_TX_CLK V_PCS40G_RESET_SD_TX_CLK(1U) + +#define S_PCS40G_RESET_SD0_RX_CLK 20 +#define V_PCS40G_RESET_SD0_RX_CLK(x) ((x) << S_PCS40G_RESET_SD0_RX_CLK) +#define F_PCS40G_RESET_SD0_RX_CLK V_PCS40G_RESET_SD0_RX_CLK(1U) + +#define S_PCS40G_RESET_SD1_RX_CLK 19 +#define V_PCS40G_RESET_SD1_RX_CLK(x) ((x) << S_PCS40G_RESET_SD1_RX_CLK) +#define F_PCS40G_RESET_SD1_RX_CLK V_PCS40G_RESET_SD1_RX_CLK(1U) + +#define S_PCS40G_RESET_SD2_RX_CLK 18 +#define V_PCS40G_RESET_SD2_RX_CLK(x) ((x) << S_PCS40G_RESET_SD2_RX_CLK) +#define F_PCS40G_RESET_SD2_RX_CLK V_PCS40G_RESET_SD2_RX_CLK(1U) + +#define S_PCS40G_RESET_SD3_RX_CLK 17 +#define V_PCS40G_RESET_SD3_RX_CLK(x) ((x) << S_PCS40G_RESET_SD3_RX_CLK) +#define F_PCS40G_RESET_SD3_RX_CLK V_PCS40G_RESET_SD3_RX_CLK(1U) + +#define S_PCS100G_RESET_CGMII_RXCLK 16 +#define V_PCS100G_RESET_CGMII_RXCLK(x) ((x) << S_PCS100G_RESET_CGMII_RXCLK) +#define F_PCS100G_RESET_CGMII_RXCLK V_PCS100G_RESET_CGMII_RXCLK(1U) + +#define S_PCS100G_RESET_CGMII_TXCLK 15 +#define V_PCS100G_RESET_CGMII_TXCLK(x) ((x) << S_PCS100G_RESET_CGMII_TXCLK) +#define F_PCS100G_RESET_CGMII_TXCLK V_PCS100G_RESET_CGMII_TXCLK(1U) + +#define S_PCS100G_RESET_TX_CLK 14 +#define V_PCS100G_RESET_TX_CLK(x) ((x) << S_PCS100G_RESET_TX_CLK) +#define F_PCS100G_RESET_TX_CLK V_PCS100G_RESET_TX_CLK(1U) + +#define S_PCS100G_RESET_SD0_RX_CLK 13 +#define V_PCS100G_RESET_SD0_RX_CLK(x) ((x) << S_PCS100G_RESET_SD0_RX_CLK) +#define F_PCS100G_RESET_SD0_RX_CLK V_PCS100G_RESET_SD0_RX_CLK(1U) + +#define S_PCS100G_RESET_SD1_RX_CLK 12 +#define V_PCS100G_RESET_SD1_RX_CLK(x) ((x) << S_PCS100G_RESET_SD1_RX_CLK) +#define F_PCS100G_RESET_SD1_RX_CLK V_PCS100G_RESET_SD1_RX_CLK(1U) + +#define S_PCS100G_RESET_SD2_RX_CLK 11 +#define V_PCS100G_RESET_SD2_RX_CLK(x) ((x) << S_PCS100G_RESET_SD2_RX_CLK) +#define F_PCS100G_RESET_SD2_RX_CLK V_PCS100G_RESET_SD2_RX_CLK(1U) + +#define S_PCS100G_RESET_SD3_RX_CLK 10 +#define V_PCS100G_RESET_SD3_RX_CLK(x) ((x) << S_PCS100G_RESET_SD3_RX_CLK) +#define F_PCS100G_RESET_SD3_RX_CLK V_PCS100G_RESET_SD3_RX_CLK(1U) + +#define S_MAC40G100G_RESET_TXCLK 9 +#define V_MAC40G100G_RESET_TXCLK(x) ((x) << S_MAC40G100G_RESET_TXCLK) +#define F_MAC40G100G_RESET_TXCLK V_MAC40G100G_RESET_TXCLK(1U) + +#define S_MAC40G100G_RESET_RXCLK 8 +#define V_MAC40G100G_RESET_RXCLK(x) ((x) << S_MAC40G100G_RESET_RXCLK) +#define F_MAC40G100G_RESET_RXCLK V_MAC40G100G_RESET_RXCLK(1U) + +#define S_MAC40G100G_RESET_FF_TX_CLK 7 +#define V_MAC40G100G_RESET_FF_TX_CLK(x) ((x) << S_MAC40G100G_RESET_FF_TX_CLK) +#define F_MAC40G100G_RESET_FF_TX_CLK V_MAC40G100G_RESET_FF_TX_CLK(1U) + +#define S_MAC40G100G_RESET_FF_RX_CLK 6 +#define V_MAC40G100G_RESET_FF_RX_CLK(x) ((x) << S_MAC40G100G_RESET_FF_RX_CLK) +#define F_MAC40G100G_RESET_FF_RX_CLK V_MAC40G100G_RESET_FF_RX_CLK(1U) + +#define S_MAC40G100G_RESET_TS_CLK 5 +#define V_MAC40G100G_RESET_TS_CLK(x) ((x) << S_MAC40G100G_RESET_TS_CLK) +#define F_MAC40G100G_RESET_TS_CLK V_MAC40G100G_RESET_TS_CLK(1U) + +#define S_MAC1G10G_RESET_RXCLK 4 +#define V_MAC1G10G_RESET_RXCLK(x) ((x) << S_MAC1G10G_RESET_RXCLK) +#define F_MAC1G10G_RESET_RXCLK V_MAC1G10G_RESET_RXCLK(1U) + +#define S_MAC1G10G_RESET_TXCLK 3 +#define V_MAC1G10G_RESET_TXCLK(x) ((x) << S_MAC1G10G_RESET_TXCLK) +#define F_MAC1G10G_RESET_TXCLK V_MAC1G10G_RESET_TXCLK(1U) + +#define S_MAC1G10G_RESET_FF_RX_CLK 2 +#define V_MAC1G10G_RESET_FF_RX_CLK(x) ((x) << S_MAC1G10G_RESET_FF_RX_CLK) +#define F_MAC1G10G_RESET_FF_RX_CLK V_MAC1G10G_RESET_FF_RX_CLK(1U) + +#define S_MAC1G10G_RESET_FF_TX_CLK 1 +#define V_MAC1G10G_RESET_FF_TX_CLK(x) ((x) << S_MAC1G10G_RESET_FF_TX_CLK) +#define F_MAC1G10G_RESET_FF_TX_CLK V_MAC1G10G_RESET_FF_TX_CLK(1U) + +#define S_XGMII_CLK_RESET 0 +#define V_XGMII_CLK_RESET(x) ((x) << S_XGMII_CLK_RESET) +#define F_XGMII_CLK_RESET V_XGMII_CLK_RESET(1U) + +#define A_MAC_PORT_MTIP_GATE_CTRL 0x830 + +#define S_AN_GATE_SD_TX_CLK 31 +#define V_AN_GATE_SD_TX_CLK(x) ((x) << S_AN_GATE_SD_TX_CLK) +#define F_AN_GATE_SD_TX_CLK V_AN_GATE_SD_TX_CLK(1U) + +#define S_AN_GATE_SD_RX_CLK 30 +#define V_AN_GATE_SD_RX_CLK(x) ((x) << S_AN_GATE_SD_RX_CLK) +#define F_AN_GATE_SD_RX_CLK V_AN_GATE_SD_RX_CLK(1U) + +#define S_SGMII_GATE_TX_CLK 29 +#define V_SGMII_GATE_TX_CLK(x) ((x) << S_SGMII_GATE_TX_CLK) +#define F_SGMII_GATE_TX_CLK V_SGMII_GATE_TX_CLK(1U) + +#define S_SGMII_GATE_RX_CLK 28 +#define V_SGMII_GATE_RX_CLK(x) ((x) << S_SGMII_GATE_RX_CLK) +#define F_SGMII_GATE_RX_CLK V_SGMII_GATE_RX_CLK(1U) + +#define S_SGMII_GATE_REF_CLK 27 +#define V_SGMII_GATE_REF_CLK(x) ((x) << S_SGMII_GATE_REF_CLK) +#define F_SGMII_GATE_REF_CLK V_SGMII_GATE_REF_CLK(1U) + +#define S_PCS10G_GATE_XFI_RXCLK 26 +#define V_PCS10G_GATE_XFI_RXCLK(x) ((x) << S_PCS10G_GATE_XFI_RXCLK) +#define F_PCS10G_GATE_XFI_RXCLK V_PCS10G_GATE_XFI_RXCLK(1U) + +#define S_PCS10G_GATE_XFI_TXCLK 25 +#define V_PCS10G_GATE_XFI_TXCLK(x) ((x) << S_PCS10G_GATE_XFI_TXCLK) +#define F_PCS10G_GATE_XFI_TXCLK V_PCS10G_GATE_XFI_TXCLK(1U) + +#define S_PCS10G_GATE_SD_TX_CLK 24 +#define V_PCS10G_GATE_SD_TX_CLK(x) ((x) << S_PCS10G_GATE_SD_TX_CLK) +#define F_PCS10G_GATE_SD_TX_CLK V_PCS10G_GATE_SD_TX_CLK(1U) + +#define S_PCS10G_GATE_SD_RX_CLK 23 +#define V_PCS10G_GATE_SD_RX_CLK(x) ((x) << S_PCS10G_GATE_SD_RX_CLK) +#define F_PCS10G_GATE_SD_RX_CLK V_PCS10G_GATE_SD_RX_CLK(1U) + +#define S_PCS40G_GATE_RXCLK 22 +#define V_PCS40G_GATE_RXCLK(x) ((x) << S_PCS40G_GATE_RXCLK) +#define F_PCS40G_GATE_RXCLK V_PCS40G_GATE_RXCLK(1U) + +#define S_PCS40G_GATE_SD_TX_CLK 21 +#define V_PCS40G_GATE_SD_TX_CLK(x) ((x) << S_PCS40G_GATE_SD_TX_CLK) +#define F_PCS40G_GATE_SD_TX_CLK V_PCS40G_GATE_SD_TX_CLK(1U) + +#define S_PCS40G_GATE_SD_RX_CLK 20 +#define V_PCS40G_GATE_SD_RX_CLK(x) ((x) << S_PCS40G_GATE_SD_RX_CLK) +#define F_PCS40G_GATE_SD_RX_CLK V_PCS40G_GATE_SD_RX_CLK(1U) + +#define S_PCS100G_GATE_CGMII_RXCLK 19 +#define V_PCS100G_GATE_CGMII_RXCLK(x) ((x) << S_PCS100G_GATE_CGMII_RXCLK) +#define F_PCS100G_GATE_CGMII_RXCLK V_PCS100G_GATE_CGMII_RXCLK(1U) + +#define S_PCS100G_GATE_CGMII_TXCLK 18 +#define V_PCS100G_GATE_CGMII_TXCLK(x) ((x) << S_PCS100G_GATE_CGMII_TXCLK) +#define F_PCS100G_GATE_CGMII_TXCLK V_PCS100G_GATE_CGMII_TXCLK(1U) + +#define S_PCS100G_GATE_TX_CLK 17 +#define V_PCS100G_GATE_TX_CLK(x) ((x) << S_PCS100G_GATE_TX_CLK) +#define F_PCS100G_GATE_TX_CLK V_PCS100G_GATE_TX_CLK(1U) + +#define S_PCS100G_GATE_SD_RX_CLK 16 +#define V_PCS100G_GATE_SD_RX_CLK(x) ((x) << S_PCS100G_GATE_SD_RX_CLK) +#define F_PCS100G_GATE_SD_RX_CLK V_PCS100G_GATE_SD_RX_CLK(1U) + +#define S_MAC40G100G_GATE_TXCLK 15 +#define V_MAC40G100G_GATE_TXCLK(x) ((x) << S_MAC40G100G_GATE_TXCLK) +#define F_MAC40G100G_GATE_TXCLK V_MAC40G100G_GATE_TXCLK(1U) + +#define S_MAC40G100G_GATE_RXCLK 14 +#define V_MAC40G100G_GATE_RXCLK(x) ((x) << S_MAC40G100G_GATE_RXCLK) +#define F_MAC40G100G_GATE_RXCLK V_MAC40G100G_GATE_RXCLK(1U) + +#define S_MAC40G100G_GATE_FF_TX_CLK 13 +#define V_MAC40G100G_GATE_FF_TX_CLK(x) ((x) << S_MAC40G100G_GATE_FF_TX_CLK) +#define F_MAC40G100G_GATE_FF_TX_CLK V_MAC40G100G_GATE_FF_TX_CLK(1U) + +#define S_MAC40G100G_GATE_FF_RX_CLK 12 +#define V_MAC40G100G_GATE_FF_RX_CLK(x) ((x) << S_MAC40G100G_GATE_FF_RX_CLK) +#define F_MAC40G100G_GATE_FF_RX_CLK V_MAC40G100G_GATE_FF_RX_CLK(1U) + +#define S_MAC40G100G_TS_CLK 11 +#define V_MAC40G100G_TS_CLK(x) ((x) << S_MAC40G100G_TS_CLK) +#define F_MAC40G100G_TS_CLK V_MAC40G100G_TS_CLK(1U) + +#define S_MAC1G10G_GATE_RXCLK 10 +#define V_MAC1G10G_GATE_RXCLK(x) ((x) << S_MAC1G10G_GATE_RXCLK) +#define F_MAC1G10G_GATE_RXCLK V_MAC1G10G_GATE_RXCLK(1U) + +#define S_MAC1G10G_GATE_TXCLK 9 +#define V_MAC1G10G_GATE_TXCLK(x) ((x) << S_MAC1G10G_GATE_TXCLK) +#define F_MAC1G10G_GATE_TXCLK V_MAC1G10G_GATE_TXCLK(1U) + +#define S_MAC1G10G_GATE_FF_RX_CLK 8 +#define V_MAC1G10G_GATE_FF_RX_CLK(x) ((x) << S_MAC1G10G_GATE_FF_RX_CLK) +#define F_MAC1G10G_GATE_FF_RX_CLK V_MAC1G10G_GATE_FF_RX_CLK(1U) + +#define S_MAC1G10G_GATE_FF_TX_CLK 7 +#define V_MAC1G10G_GATE_FF_TX_CLK(x) ((x) << S_MAC1G10G_GATE_FF_TX_CLK) +#define F_MAC1G10G_GATE_FF_TX_CLK V_MAC1G10G_GATE_FF_TX_CLK(1U) + +#define S_AEC_RX 6 +#define V_AEC_RX(x) ((x) << S_AEC_RX) +#define F_AEC_RX V_AEC_RX(1U) + +#define S_AEC_TX 5 +#define V_AEC_TX(x) ((x) << S_AEC_TX) +#define F_AEC_TX V_AEC_TX(1U) + +#define S_PCS100G_CLK_ENABLE 4 +#define V_PCS100G_CLK_ENABLE(x) ((x) << S_PCS100G_CLK_ENABLE) +#define F_PCS100G_CLK_ENABLE V_PCS100G_CLK_ENABLE(1U) + +#define S_PCS40G_CLK_ENABLE 3 +#define V_PCS40G_CLK_ENABLE(x) ((x) << S_PCS40G_CLK_ENABLE) +#define F_PCS40G_CLK_ENABLE V_PCS40G_CLK_ENABLE(1U) + +#define S_PCS10G_CLK_ENABLE 2 +#define V_PCS10G_CLK_ENABLE(x) ((x) << S_PCS10G_CLK_ENABLE) +#define F_PCS10G_CLK_ENABLE V_PCS10G_CLK_ENABLE(1U) + +#define S_PCS1G_CLK_ENABLE 1 +#define V_PCS1G_CLK_ENABLE(x) ((x) << S_PCS1G_CLK_ENABLE) +#define F_PCS1G_CLK_ENABLE V_PCS1G_CLK_ENABLE(1U) + +#define S_AN_CLK_ENABLE 0 +#define V_AN_CLK_ENABLE(x) ((x) << S_AN_CLK_ENABLE) +#define F_AN_CLK_ENABLE V_AN_CLK_ENABLE(1U) + #define A_MAC_PORT_LINK_STATUS 0x834 #define S_AN_DONE 6 @@ -33837,6 +45749,606 @@ #define V_BLOCK_LOCK(x) ((x) << S_BLOCK_LOCK) #define F_BLOCK_LOCK V_BLOCK_LOCK(1U) +#define S_HI_BER_ST 7 +#define V_HI_BER_ST(x) ((x) << S_HI_BER_ST) +#define F_HI_BER_ST V_HI_BER_ST(1U) + +#define S_AN_DONE_ST 6 +#define V_AN_DONE_ST(x) ((x) << S_AN_DONE_ST) +#define F_AN_DONE_ST V_AN_DONE_ST(1U) + +#define A_MAC_PORT_AEC_ADD_CTL_STAT_0 0x838 + +#define S_AEC_SYS_LANE_TYPE_3 11 +#define V_AEC_SYS_LANE_TYPE_3(x) ((x) << S_AEC_SYS_LANE_TYPE_3) +#define F_AEC_SYS_LANE_TYPE_3 V_AEC_SYS_LANE_TYPE_3(1U) + +#define S_AEC_SYS_LANE_TYPE_2 10 +#define V_AEC_SYS_LANE_TYPE_2(x) ((x) << S_AEC_SYS_LANE_TYPE_2) +#define F_AEC_SYS_LANE_TYPE_2 V_AEC_SYS_LANE_TYPE_2(1U) + +#define S_AEC_SYS_LANE_TYPE_1 9 +#define V_AEC_SYS_LANE_TYPE_1(x) ((x) << S_AEC_SYS_LANE_TYPE_1) +#define F_AEC_SYS_LANE_TYPE_1 V_AEC_SYS_LANE_TYPE_1(1U) + +#define S_AEC_SYS_LANE_TYPE_0 8 +#define V_AEC_SYS_LANE_TYPE_0(x) ((x) << S_AEC_SYS_LANE_TYPE_0) +#define F_AEC_SYS_LANE_TYPE_0 V_AEC_SYS_LANE_TYPE_0(1U) + +#define S_AEC_SYS_LANE_SELECT_3 6 +#define M_AEC_SYS_LANE_SELECT_3 0x3U +#define V_AEC_SYS_LANE_SELECT_3(x) ((x) << S_AEC_SYS_LANE_SELECT_3) +#define G_AEC_SYS_LANE_SELECT_3(x) (((x) >> S_AEC_SYS_LANE_SELECT_3) & M_AEC_SYS_LANE_SELECT_3) + +#define S_AEC_SYS_LANE_SELECT_2 4 +#define M_AEC_SYS_LANE_SELECT_2 0x3U +#define V_AEC_SYS_LANE_SELECT_2(x) ((x) << S_AEC_SYS_LANE_SELECT_2) +#define G_AEC_SYS_LANE_SELECT_2(x) (((x) >> S_AEC_SYS_LANE_SELECT_2) & M_AEC_SYS_LANE_SELECT_2) + +#define S_AEC_SYS_LANE_SELECT_1 2 +#define M_AEC_SYS_LANE_SELECT_1 0x3U +#define V_AEC_SYS_LANE_SELECT_1(x) ((x) << S_AEC_SYS_LANE_SELECT_1) +#define G_AEC_SYS_LANE_SELECT_1(x) (((x) >> S_AEC_SYS_LANE_SELECT_1) & M_AEC_SYS_LANE_SELECT_1) + +#define S_AEC_SYS_LANE_SELECT_O 0 +#define M_AEC_SYS_LANE_SELECT_O 0x3U +#define V_AEC_SYS_LANE_SELECT_O(x) ((x) << S_AEC_SYS_LANE_SELECT_O) +#define G_AEC_SYS_LANE_SELECT_O(x) (((x) >> S_AEC_SYS_LANE_SELECT_O) & M_AEC_SYS_LANE_SELECT_O) + +#define A_MAC_PORT_AEC_ADD_CTL_STAT_1 0x83c + +#define S_AEC_RX_UNKNOWN_LANE_3 11 +#define V_AEC_RX_UNKNOWN_LANE_3(x) ((x) << S_AEC_RX_UNKNOWN_LANE_3) +#define F_AEC_RX_UNKNOWN_LANE_3 V_AEC_RX_UNKNOWN_LANE_3(1U) + +#define S_AEC_RX_UNKNOWN_LANE_2 10 +#define V_AEC_RX_UNKNOWN_LANE_2(x) ((x) << S_AEC_RX_UNKNOWN_LANE_2) +#define F_AEC_RX_UNKNOWN_LANE_2 V_AEC_RX_UNKNOWN_LANE_2(1U) + +#define S_AEC_RX_UNKNOWN_LANE_1 9 +#define V_AEC_RX_UNKNOWN_LANE_1(x) ((x) << S_AEC_RX_UNKNOWN_LANE_1) +#define F_AEC_RX_UNKNOWN_LANE_1 V_AEC_RX_UNKNOWN_LANE_1(1U) + +#define S_AEC_RX_UNKNOWN_LANE_0 8 +#define V_AEC_RX_UNKNOWN_LANE_0(x) ((x) << S_AEC_RX_UNKNOWN_LANE_0) +#define F_AEC_RX_UNKNOWN_LANE_0 V_AEC_RX_UNKNOWN_LANE_0(1U) + +#define S_AEC_RX_LANE_ID_3 6 +#define M_AEC_RX_LANE_ID_3 0x3U +#define V_AEC_RX_LANE_ID_3(x) ((x) << S_AEC_RX_LANE_ID_3) +#define G_AEC_RX_LANE_ID_3(x) (((x) >> S_AEC_RX_LANE_ID_3) & M_AEC_RX_LANE_ID_3) + +#define S_AEC_RX_LANE_ID_2 4 +#define M_AEC_RX_LANE_ID_2 0x3U +#define V_AEC_RX_LANE_ID_2(x) ((x) << S_AEC_RX_LANE_ID_2) +#define G_AEC_RX_LANE_ID_2(x) (((x) >> S_AEC_RX_LANE_ID_2) & M_AEC_RX_LANE_ID_2) + +#define S_AEC_RX_LANE_ID_1 2 +#define M_AEC_RX_LANE_ID_1 0x3U +#define V_AEC_RX_LANE_ID_1(x) ((x) << S_AEC_RX_LANE_ID_1) +#define G_AEC_RX_LANE_ID_1(x) (((x) >> S_AEC_RX_LANE_ID_1) & M_AEC_RX_LANE_ID_1) + +#define S_AEC_RX_LANE_ID_O 0 +#define M_AEC_RX_LANE_ID_O 0x3U +#define V_AEC_RX_LANE_ID_O(x) ((x) << S_AEC_RX_LANE_ID_O) +#define G_AEC_RX_LANE_ID_O(x) (((x) >> S_AEC_RX_LANE_ID_O) & M_AEC_RX_LANE_ID_O) + +#define A_MAC_PORT_AEC_XGMII_TIMER_LO_40G 0x840 + +#define S_XGMII_CLK_IN_1MS_LO_40G 0 +#define M_XGMII_CLK_IN_1MS_LO_40G 0xffffU +#define V_XGMII_CLK_IN_1MS_LO_40G(x) ((x) << S_XGMII_CLK_IN_1MS_LO_40G) +#define G_XGMII_CLK_IN_1MS_LO_40G(x) (((x) >> S_XGMII_CLK_IN_1MS_LO_40G) & M_XGMII_CLK_IN_1MS_LO_40G) + +#define A_MAC_PORT_AEC_XGMII_TIMER_HI_40G 0x844 + +#define S_XGMII_CLK_IN_1MS_HI_40G 0 +#define M_XGMII_CLK_IN_1MS_HI_40G 0xfU +#define V_XGMII_CLK_IN_1MS_HI_40G(x) ((x) << S_XGMII_CLK_IN_1MS_HI_40G) +#define G_XGMII_CLK_IN_1MS_HI_40G(x) (((x) >> S_XGMII_CLK_IN_1MS_HI_40G) & M_XGMII_CLK_IN_1MS_HI_40G) + +#define A_MAC_PORT_AEC_XGMII_TIMER_LO_100G 0x848 + +#define S_XGMII_CLK_IN_1MS_LO_100G 0 +#define M_XGMII_CLK_IN_1MS_LO_100G 0xffffU +#define V_XGMII_CLK_IN_1MS_LO_100G(x) ((x) << S_XGMII_CLK_IN_1MS_LO_100G) +#define G_XGMII_CLK_IN_1MS_LO_100G(x) (((x) >> S_XGMII_CLK_IN_1MS_LO_100G) & M_XGMII_CLK_IN_1MS_LO_100G) + +#define A_MAC_PORT_AEC_XGMII_TIMER_HI_100G 0x84c + +#define S_XGMII_CLK_IN_1MS_HI_100G 0 +#define M_XGMII_CLK_IN_1MS_HI_100G 0xfU +#define V_XGMII_CLK_IN_1MS_HI_100G(x) ((x) << S_XGMII_CLK_IN_1MS_HI_100G) +#define G_XGMII_CLK_IN_1MS_HI_100G(x) (((x) >> S_XGMII_CLK_IN_1MS_HI_100G) & M_XGMII_CLK_IN_1MS_HI_100G) + +#define A_MAC_PORT_AEC_DEBUG_LO_0 0x850 + +#define S_CTL_FSM_CUR_STATE 28 +#define M_CTL_FSM_CUR_STATE 0x7U +#define V_CTL_FSM_CUR_STATE(x) ((x) << S_CTL_FSM_CUR_STATE) +#define G_CTL_FSM_CUR_STATE(x) (((x) >> S_CTL_FSM_CUR_STATE) & M_CTL_FSM_CUR_STATE) + +#define S_CIN_FSM_CUR_STATE 26 +#define M_CIN_FSM_CUR_STATE 0x3U +#define V_CIN_FSM_CUR_STATE(x) ((x) << S_CIN_FSM_CUR_STATE) +#define G_CIN_FSM_CUR_STATE(x) (((x) >> S_CIN_FSM_CUR_STATE) & M_CIN_FSM_CUR_STATE) + +#define S_CRI_FSM_CUR_STATE 23 +#define M_CRI_FSM_CUR_STATE 0x7U +#define V_CRI_FSM_CUR_STATE(x) ((x) << S_CRI_FSM_CUR_STATE) +#define G_CRI_FSM_CUR_STATE(x) (((x) >> S_CRI_FSM_CUR_STATE) & M_CRI_FSM_CUR_STATE) + +#define S_CU_C3_ACK_VALUE 21 +#define M_CU_C3_ACK_VALUE 0x3U +#define V_CU_C3_ACK_VALUE(x) ((x) << S_CU_C3_ACK_VALUE) +#define G_CU_C3_ACK_VALUE(x) (((x) >> S_CU_C3_ACK_VALUE) & M_CU_C3_ACK_VALUE) + +#define S_CU_C2_ACK_VALUE 19 +#define M_CU_C2_ACK_VALUE 0x3U +#define V_CU_C2_ACK_VALUE(x) ((x) << S_CU_C2_ACK_VALUE) +#define G_CU_C2_ACK_VALUE(x) (((x) >> S_CU_C2_ACK_VALUE) & M_CU_C2_ACK_VALUE) + +#define S_CU_C1_ACK_VALUE 17 +#define M_CU_C1_ACK_VALUE 0x3U +#define V_CU_C1_ACK_VALUE(x) ((x) << S_CU_C1_ACK_VALUE) +#define G_CU_C1_ACK_VALUE(x) (((x) >> S_CU_C1_ACK_VALUE) & M_CU_C1_ACK_VALUE) + +#define S_CU_C0_ACK_VALUE 15 +#define M_CU_C0_ACK_VALUE 0x3U +#define V_CU_C0_ACK_VALUE(x) ((x) << S_CU_C0_ACK_VALUE) +#define G_CU_C0_ACK_VALUE(x) (((x) >> S_CU_C0_ACK_VALUE) & M_CU_C0_ACK_VALUE) + +#define S_CX_INIT 13 +#define V_CX_INIT(x) ((x) << S_CX_INIT) +#define F_CX_INIT V_CX_INIT(1U) + +#define S_CX_PRESET 12 +#define V_CX_PRESET(x) ((x) << S_CX_PRESET) +#define F_CX_PRESET V_CX_PRESET(1U) + +#define S_CUF_C3_UPDATE 9 +#define M_CUF_C3_UPDATE 0x3U +#define V_CUF_C3_UPDATE(x) ((x) << S_CUF_C3_UPDATE) +#define G_CUF_C3_UPDATE(x) (((x) >> S_CUF_C3_UPDATE) & M_CUF_C3_UPDATE) + +#define S_CUF_C2_UPDATE 7 +#define M_CUF_C2_UPDATE 0x3U +#define V_CUF_C2_UPDATE(x) ((x) << S_CUF_C2_UPDATE) +#define G_CUF_C2_UPDATE(x) (((x) >> S_CUF_C2_UPDATE) & M_CUF_C2_UPDATE) + +#define S_CUF_C1_UPDATE 5 +#define M_CUF_C1_UPDATE 0x3U +#define V_CUF_C1_UPDATE(x) ((x) << S_CUF_C1_UPDATE) +#define G_CUF_C1_UPDATE(x) (((x) >> S_CUF_C1_UPDATE) & M_CUF_C1_UPDATE) + +#define S_CUF_C0_UPDATE 3 +#define M_CUF_C0_UPDATE 0x3U +#define V_CUF_C0_UPDATE(x) ((x) << S_CUF_C0_UPDATE) +#define G_CUF_C0_UPDATE(x) (((x) >> S_CUF_C0_UPDATE) & M_CUF_C0_UPDATE) + +#define S_REG_FPH_ATTR_TXUPDAT_VALID 2 +#define V_REG_FPH_ATTR_TXUPDAT_VALID(x) ((x) << S_REG_FPH_ATTR_TXUPDAT_VALID) +#define F_REG_FPH_ATTR_TXUPDAT_VALID V_REG_FPH_ATTR_TXUPDAT_VALID(1U) + +#define S_REG_FPH_ATTR_TXSTAT_VALID 1 +#define V_REG_FPH_ATTR_TXSTAT_VALID(x) ((x) << S_REG_FPH_ATTR_TXSTAT_VALID) +#define F_REG_FPH_ATTR_TXSTAT_VALID V_REG_FPH_ATTR_TXSTAT_VALID(1U) + +#define S_REG_MAN_DEC_REQ 0 +#define V_REG_MAN_DEC_REQ(x) ((x) << S_REG_MAN_DEC_REQ) +#define F_REG_MAN_DEC_REQ V_REG_MAN_DEC_REQ(1U) + +#define A_MAC_PORT_AEC_DEBUG_HI_0 0x854 + +#define S_FC_LSNA_ 12 +#define V_FC_LSNA_(x) ((x) << S_FC_LSNA_) +#define F_FC_LSNA_ V_FC_LSNA_(1U) + +#define S_CUF_C0_FSM_DEBUG 9 +#define M_CUF_C0_FSM_DEBUG 0x7U +#define V_CUF_C0_FSM_DEBUG(x) ((x) << S_CUF_C0_FSM_DEBUG) +#define G_CUF_C0_FSM_DEBUG(x) (((x) >> S_CUF_C0_FSM_DEBUG) & M_CUF_C0_FSM_DEBUG) + +#define S_CUF_C1_FSM_DEBUG 6 +#define M_CUF_C1_FSM_DEBUG 0x7U +#define V_CUF_C1_FSM_DEBUG(x) ((x) << S_CUF_C1_FSM_DEBUG) +#define G_CUF_C1_FSM_DEBUG(x) (((x) >> S_CUF_C1_FSM_DEBUG) & M_CUF_C1_FSM_DEBUG) + +#define S_CUF_C2_FSM_DEBUG 3 +#define M_CUF_C2_FSM_DEBUG 0x7U +#define V_CUF_C2_FSM_DEBUG(x) ((x) << S_CUF_C2_FSM_DEBUG) +#define G_CUF_C2_FSM_DEBUG(x) (((x) >> S_CUF_C2_FSM_DEBUG) & M_CUF_C2_FSM_DEBUG) + +#define S_LCK_FSM_CUR_STATE 0 +#define M_LCK_FSM_CUR_STATE 0x7U +#define V_LCK_FSM_CUR_STATE(x) ((x) << S_LCK_FSM_CUR_STATE) +#define G_LCK_FSM_CUR_STATE(x) (((x) >> S_LCK_FSM_CUR_STATE) & M_LCK_FSM_CUR_STATE) + +#define A_MAC_PORT_AEC_DEBUG_LO_1 0x858 +#define A_MAC_PORT_AEC_DEBUG_HI_1 0x85c +#define A_MAC_PORT_AEC_DEBUG_LO_2 0x860 +#define A_MAC_PORT_AEC_DEBUG_HI_2 0x864 +#define A_MAC_PORT_AEC_DEBUG_LO_3 0x868 +#define A_MAC_PORT_AEC_DEBUG_HI_3 0x86c +#define A_MAC_PORT_MAC_DEBUG_RO 0x870 + +#define S_MAC40G100G_TX_UNDERFLOW 13 +#define V_MAC40G100G_TX_UNDERFLOW(x) ((x) << S_MAC40G100G_TX_UNDERFLOW) +#define F_MAC40G100G_TX_UNDERFLOW V_MAC40G100G_TX_UNDERFLOW(1U) + +#define S_MAC1G10G_MAGIC_IND 12 +#define V_MAC1G10G_MAGIC_IND(x) ((x) << S_MAC1G10G_MAGIC_IND) +#define F_MAC1G10G_MAGIC_IND V_MAC1G10G_MAGIC_IND(1U) + +#define S_MAC1G10G_FF_RX_EMPTY 11 +#define V_MAC1G10G_FF_RX_EMPTY(x) ((x) << S_MAC1G10G_FF_RX_EMPTY) +#define F_MAC1G10G_FF_RX_EMPTY V_MAC1G10G_FF_RX_EMPTY(1U) + +#define S_MAC1G10G_FF_TX_OVR_ERR 10 +#define V_MAC1G10G_FF_TX_OVR_ERR(x) ((x) << S_MAC1G10G_FF_TX_OVR_ERR) +#define F_MAC1G10G_FF_TX_OVR_ERR V_MAC1G10G_FF_TX_OVR_ERR(1U) + +#define S_MAC1G10G_IF_MODE_ENA 8 +#define M_MAC1G10G_IF_MODE_ENA 0x3U +#define V_MAC1G10G_IF_MODE_ENA(x) ((x) << S_MAC1G10G_IF_MODE_ENA) +#define G_MAC1G10G_IF_MODE_ENA(x) (((x) >> S_MAC1G10G_IF_MODE_ENA) & M_MAC1G10G_IF_MODE_ENA) + +#define S_MAC1G10G_MII_ENA_10 7 +#define V_MAC1G10G_MII_ENA_10(x) ((x) << S_MAC1G10G_MII_ENA_10) +#define F_MAC1G10G_MII_ENA_10 V_MAC1G10G_MII_ENA_10(1U) + +#define S_MAC1G10G_PAUSE_ON 6 +#define V_MAC1G10G_PAUSE_ON(x) ((x) << S_MAC1G10G_PAUSE_ON) +#define F_MAC1G10G_PAUSE_ON V_MAC1G10G_PAUSE_ON(1U) + +#define S_MAC1G10G_PFC_MODE 5 +#define V_MAC1G10G_PFC_MODE(x) ((x) << S_MAC1G10G_PFC_MODE) +#define F_MAC1G10G_PFC_MODE V_MAC1G10G_PFC_MODE(1U) + +#define S_MAC1G10G_RX_SFD_O 4 +#define V_MAC1G10G_RX_SFD_O(x) ((x) << S_MAC1G10G_RX_SFD_O) +#define F_MAC1G10G_RX_SFD_O V_MAC1G10G_RX_SFD_O(1U) + +#define S_MAC1G10G_TX_EMPTY 3 +#define V_MAC1G10G_TX_EMPTY(x) ((x) << S_MAC1G10G_TX_EMPTY) +#define F_MAC1G10G_TX_EMPTY V_MAC1G10G_TX_EMPTY(1U) + +#define S_MAC1G10G_TX_SFD_O 2 +#define V_MAC1G10G_TX_SFD_O(x) ((x) << S_MAC1G10G_TX_SFD_O) +#define F_MAC1G10G_TX_SFD_O V_MAC1G10G_TX_SFD_O(1U) + +#define S_MAC1G10G_TX_TS_FRM_OUT 1 +#define V_MAC1G10G_TX_TS_FRM_OUT(x) ((x) << S_MAC1G10G_TX_TS_FRM_OUT) +#define F_MAC1G10G_TX_TS_FRM_OUT V_MAC1G10G_TX_TS_FRM_OUT(1U) + +#define S_MAC1G10G_TX_UNDERFLOW 0 +#define V_MAC1G10G_TX_UNDERFLOW(x) ((x) << S_MAC1G10G_TX_UNDERFLOW) +#define F_MAC1G10G_TX_UNDERFLOW V_MAC1G10G_TX_UNDERFLOW(1U) + +#define A_MAC_PORT_MAC_CTRL_RW 0x874 + +#define S_MAC40G100G_FF_TX_PFC_XOFF 17 +#define M_MAC40G100G_FF_TX_PFC_XOFF 0xffU +#define V_MAC40G100G_FF_TX_PFC_XOFF(x) ((x) << S_MAC40G100G_FF_TX_PFC_XOFF) +#define G_MAC40G100G_FF_TX_PFC_XOFF(x) (((x) >> S_MAC40G100G_FF_TX_PFC_XOFF) & M_MAC40G100G_FF_TX_PFC_XOFF) + +#define S_MAC40G100G_TX_LOC_FAULT 16 +#define V_MAC40G100G_TX_LOC_FAULT(x) ((x) << S_MAC40G100G_TX_LOC_FAULT) +#define F_MAC40G100G_TX_LOC_FAULT V_MAC40G100G_TX_LOC_FAULT(1U) + +#define S_MAC40G100G_TX_REM_FAULT 15 +#define V_MAC40G100G_TX_REM_FAULT(x) ((x) << S_MAC40G100G_TX_REM_FAULT) +#define F_MAC40G100G_TX_REM_FAULT V_MAC40G100G_TX_REM_FAULT(1U) + +#define S_MAC40G_LOOP_BCK 14 +#define V_MAC40G_LOOP_BCK(x) ((x) << S_MAC40G_LOOP_BCK) +#define F_MAC40G_LOOP_BCK V_MAC40G_LOOP_BCK(1U) + +#define S_MAC1G10G_MAGIC_ENA 13 +#define V_MAC1G10G_MAGIC_ENA(x) ((x) << S_MAC1G10G_MAGIC_ENA) +#define F_MAC1G10G_MAGIC_ENA V_MAC1G10G_MAGIC_ENA(1U) + +#define S_MAC1G10G_IF_MODE_SET 11 +#define M_MAC1G10G_IF_MODE_SET 0x3U +#define V_MAC1G10G_IF_MODE_SET(x) ((x) << S_MAC1G10G_IF_MODE_SET) +#define G_MAC1G10G_IF_MODE_SET(x) (((x) >> S_MAC1G10G_IF_MODE_SET) & M_MAC1G10G_IF_MODE_SET) + +#define S_MAC1G10G_TX_LOC_FAULT 10 +#define V_MAC1G10G_TX_LOC_FAULT(x) ((x) << S_MAC1G10G_TX_LOC_FAULT) +#define F_MAC1G10G_TX_LOC_FAULT V_MAC1G10G_TX_LOC_FAULT(1U) + +#define S_MAC1G10G_TX_REM_FAULT 9 +#define V_MAC1G10G_TX_REM_FAULT(x) ((x) << S_MAC1G10G_TX_REM_FAULT) +#define F_MAC1G10G_TX_REM_FAULT V_MAC1G10G_TX_REM_FAULT(1U) + +#define S_MAC1G10G_XOFF_GEN 1 +#define M_MAC1G10G_XOFF_GEN 0xffU +#define V_MAC1G10G_XOFF_GEN(x) ((x) << S_MAC1G10G_XOFF_GEN) +#define G_MAC1G10G_XOFF_GEN(x) (((x) >> S_MAC1G10G_XOFF_GEN) & M_MAC1G10G_XOFF_GEN) + +#define S_MAC1G_LOOP_BCK 0 +#define V_MAC1G_LOOP_BCK(x) ((x) << S_MAC1G_LOOP_BCK) +#define F_MAC1G_LOOP_BCK V_MAC1G_LOOP_BCK(1U) + +#define A_MAC_PORT_PCS_DEBUG0_RO 0x878 + +#define S_FPGA_LOCK 26 +#define M_FPGA_LOCK 0xfU +#define V_FPGA_LOCK(x) ((x) << S_FPGA_LOCK) +#define G_FPGA_LOCK(x) (((x) >> S_FPGA_LOCK) & M_FPGA_LOCK) + +#define S_T6_AN_DONE 25 +#define V_T6_AN_DONE(x) ((x) << S_T6_AN_DONE) +#define F_T6_AN_DONE V_T6_AN_DONE(1U) + +#define S_AN_INT 24 +#define V_AN_INT(x) ((x) << S_AN_INT) +#define F_AN_INT V_AN_INT(1U) + +#define S_AN_PCS_RX_CLK_ENA 23 +#define V_AN_PCS_RX_CLK_ENA(x) ((x) << S_AN_PCS_RX_CLK_ENA) +#define F_AN_PCS_RX_CLK_ENA V_AN_PCS_RX_CLK_ENA(1U) + +#define S_AN_PCS_TX_CLK_ENA 22 +#define V_AN_PCS_TX_CLK_ENA(x) ((x) << S_AN_PCS_TX_CLK_ENA) +#define F_AN_PCS_TX_CLK_ENA V_AN_PCS_TX_CLK_ENA(1U) + +#define S_AN_SELECT 17 +#define M_AN_SELECT 0x1fU +#define V_AN_SELECT(x) ((x) << S_AN_SELECT) +#define G_AN_SELECT(x) (((x) >> S_AN_SELECT) & M_AN_SELECT) + +#define S_AN_PROG 16 +#define V_AN_PROG(x) ((x) << S_AN_PROG) +#define F_AN_PROG V_AN_PROG(1U) + +#define S_PCS40G_BLOCK_LOCK 12 +#define M_PCS40G_BLOCK_LOCK 0xfU +#define V_PCS40G_BLOCK_LOCK(x) ((x) << S_PCS40G_BLOCK_LOCK) +#define G_PCS40G_BLOCK_LOCK(x) (((x) >> S_PCS40G_BLOCK_LOCK) & M_PCS40G_BLOCK_LOCK) + +#define S_PCS40G_BER_TIMER_DONE 11 +#define V_PCS40G_BER_TIMER_DONE(x) ((x) << S_PCS40G_BER_TIMER_DONE) +#define F_PCS40G_BER_TIMER_DONE V_PCS40G_BER_TIMER_DONE(1U) + +#define S_PCS10G_FEC_LOCKED 10 +#define V_PCS10G_FEC_LOCKED(x) ((x) << S_PCS10G_FEC_LOCKED) +#define F_PCS10G_FEC_LOCKED V_PCS10G_FEC_LOCKED(1U) + +#define S_PCS10G_BLOCK_LOCK 9 +#define V_PCS10G_BLOCK_LOCK(x) ((x) << S_PCS10G_BLOCK_LOCK) +#define F_PCS10G_BLOCK_LOCK V_PCS10G_BLOCK_LOCK(1U) + +#define S_SGMII_GMII_COL 8 +#define V_SGMII_GMII_COL(x) ((x) << S_SGMII_GMII_COL) +#define F_SGMII_GMII_COL V_SGMII_GMII_COL(1U) + +#define S_SGMII_GMII_CRS 7 +#define V_SGMII_GMII_CRS(x) ((x) << S_SGMII_GMII_CRS) +#define F_SGMII_GMII_CRS V_SGMII_GMII_CRS(1U) + +#define S_SGMII_SD_LOOPBACK 6 +#define V_SGMII_SD_LOOPBACK(x) ((x) << S_SGMII_SD_LOOPBACK) +#define F_SGMII_SD_LOOPBACK V_SGMII_SD_LOOPBACK(1U) + +#define S_SGMII_SG_AN_DONE 5 +#define V_SGMII_SG_AN_DONE(x) ((x) << S_SGMII_SG_AN_DONE) +#define F_SGMII_SG_AN_DONE V_SGMII_SG_AN_DONE(1U) + +#define S_SGMII_SG_HD 4 +#define V_SGMII_SG_HD(x) ((x) << S_SGMII_SG_HD) +#define F_SGMII_SG_HD V_SGMII_SG_HD(1U) + +#define S_SGMII_SG_PAGE_RX 3 +#define V_SGMII_SG_PAGE_RX(x) ((x) << S_SGMII_SG_PAGE_RX) +#define F_SGMII_SG_PAGE_RX V_SGMII_SG_PAGE_RX(1U) + +#define S_SGMII_SG_RX_SYNC 2 +#define V_SGMII_SG_RX_SYNC(x) ((x) << S_SGMII_SG_RX_SYNC) +#define F_SGMII_SG_RX_SYNC V_SGMII_SG_RX_SYNC(1U) + +#define S_SGMII_SG_SPEED 0 +#define M_SGMII_SG_SPEED 0x3U +#define V_SGMII_SG_SPEED(x) ((x) << S_SGMII_SG_SPEED) +#define G_SGMII_SG_SPEED(x) (((x) >> S_SGMII_SG_SPEED) & M_SGMII_SG_SPEED) + +#define A_MAC_PORT_PCS_CTRL_RW 0x87c + +#define S_TX_LI_FAULT 31 +#define V_TX_LI_FAULT(x) ((x) << S_TX_LI_FAULT) +#define F_TX_LI_FAULT V_TX_LI_FAULT(1U) + +#define S_T6_PAD 30 +#define V_T6_PAD(x) ((x) << S_T6_PAD) +#define F_T6_PAD V_T6_PAD(1U) + +#define S_BLK_STB_VAL 22 +#define M_BLK_STB_VAL 0xffU +#define V_BLK_STB_VAL(x) ((x) << S_BLK_STB_VAL) +#define G_BLK_STB_VAL(x) (((x) >> S_BLK_STB_VAL) & M_BLK_STB_VAL) + +#define S_DEBUG_SEL 18 +#define M_DEBUG_SEL 0xfU +#define V_DEBUG_SEL(x) ((x) << S_DEBUG_SEL) +#define G_DEBUG_SEL(x) (((x) >> S_DEBUG_SEL) & M_DEBUG_SEL) + +#define S_SGMII_LOOP 15 +#define M_SGMII_LOOP 0x7U +#define V_SGMII_LOOP(x) ((x) << S_SGMII_LOOP) +#define G_SGMII_LOOP(x) (((x) >> S_SGMII_LOOP) & M_SGMII_LOOP) + +#define S_AN_DIS_TIMER 14 +#define V_AN_DIS_TIMER(x) ((x) << S_AN_DIS_TIMER) +#define F_AN_DIS_TIMER V_AN_DIS_TIMER(1U) + +#define S_PCS100G_BER_TIMER_SHORT 13 +#define V_PCS100G_BER_TIMER_SHORT(x) ((x) << S_PCS100G_BER_TIMER_SHORT) +#define F_PCS100G_BER_TIMER_SHORT V_PCS100G_BER_TIMER_SHORT(1U) + +#define S_PCS100G_TX_LANE_THRESH 9 +#define M_PCS100G_TX_LANE_THRESH 0xfU +#define V_PCS100G_TX_LANE_THRESH(x) ((x) << S_PCS100G_TX_LANE_THRESH) +#define G_PCS100G_TX_LANE_THRESH(x) (((x) >> S_PCS100G_TX_LANE_THRESH) & M_PCS100G_TX_LANE_THRESH) + +#define S_PCS100G_VL_INTVL 8 +#define V_PCS100G_VL_INTVL(x) ((x) << S_PCS100G_VL_INTVL) +#define F_PCS100G_VL_INTVL V_PCS100G_VL_INTVL(1U) + +#define S_SGMII_TX_LANE_CKMULT 4 +#define M_SGMII_TX_LANE_CKMULT 0x7U +#define V_SGMII_TX_LANE_CKMULT(x) ((x) << S_SGMII_TX_LANE_CKMULT) +#define G_SGMII_TX_LANE_CKMULT(x) (((x) >> S_SGMII_TX_LANE_CKMULT) & M_SGMII_TX_LANE_CKMULT) + +#define S_SGMII_TX_LANE_THRESH 0 +#define M_SGMII_TX_LANE_THRESH 0xfU +#define V_SGMII_TX_LANE_THRESH(x) ((x) << S_SGMII_TX_LANE_THRESH) +#define G_SGMII_TX_LANE_THRESH(x) (((x) >> S_SGMII_TX_LANE_THRESH) & M_SGMII_TX_LANE_THRESH) + +#define A_MAC_PORT_PCS_DEBUG1_RO 0x880 + +#define S_PCS100G_ALIGN_LOCK 21 +#define V_PCS100G_ALIGN_LOCK(x) ((x) << S_PCS100G_ALIGN_LOCK) +#define F_PCS100G_ALIGN_LOCK V_PCS100G_ALIGN_LOCK(1U) + +#define S_PCS100G_BER_TIMER_DONE 20 +#define V_PCS100G_BER_TIMER_DONE(x) ((x) << S_PCS100G_BER_TIMER_DONE) +#define F_PCS100G_BER_TIMER_DONE V_PCS100G_BER_TIMER_DONE(1U) + +#define S_PCS100G_BLOCK_LOCK 0 +#define M_PCS100G_BLOCK_LOCK 0xfffffU +#define V_PCS100G_BLOCK_LOCK(x) ((x) << S_PCS100G_BLOCK_LOCK) +#define G_PCS100G_BLOCK_LOCK(x) (((x) >> S_PCS100G_BLOCK_LOCK) & M_PCS100G_BLOCK_LOCK) + +#define A_MAC_PORT_PERR_INT_EN_100G 0x884 + +#define S_PERR_RX_FEC100G_DLY 29 +#define V_PERR_RX_FEC100G_DLY(x) ((x) << S_PERR_RX_FEC100G_DLY) +#define F_PERR_RX_FEC100G_DLY V_PERR_RX_FEC100G_DLY(1U) + +#define S_PERR_RX_FEC100G 28 +#define V_PERR_RX_FEC100G(x) ((x) << S_PERR_RX_FEC100G) +#define F_PERR_RX_FEC100G V_PERR_RX_FEC100G(1U) + +#define S_PERR_RX3_FEC100G_DK 27 +#define V_PERR_RX3_FEC100G_DK(x) ((x) << S_PERR_RX3_FEC100G_DK) +#define F_PERR_RX3_FEC100G_DK V_PERR_RX3_FEC100G_DK(1U) + +#define S_PERR_RX2_FEC100G_DK 26 +#define V_PERR_RX2_FEC100G_DK(x) ((x) << S_PERR_RX2_FEC100G_DK) +#define F_PERR_RX2_FEC100G_DK V_PERR_RX2_FEC100G_DK(1U) + +#define S_PERR_RX1_FEC100G_DK 25 +#define V_PERR_RX1_FEC100G_DK(x) ((x) << S_PERR_RX1_FEC100G_DK) +#define F_PERR_RX1_FEC100G_DK V_PERR_RX1_FEC100G_DK(1U) + +#define S_PERR_RX0_FEC100G_DK 24 +#define V_PERR_RX0_FEC100G_DK(x) ((x) << S_PERR_RX0_FEC100G_DK) +#define F_PERR_RX0_FEC100G_DK V_PERR_RX0_FEC100G_DK(1U) + +#define S_PERR_TX3_PCS100G 23 +#define V_PERR_TX3_PCS100G(x) ((x) << S_PERR_TX3_PCS100G) +#define F_PERR_TX3_PCS100G V_PERR_TX3_PCS100G(1U) + +#define S_PERR_TX2_PCS100G 22 +#define V_PERR_TX2_PCS100G(x) ((x) << S_PERR_TX2_PCS100G) +#define F_PERR_TX2_PCS100G V_PERR_TX2_PCS100G(1U) + +#define S_PERR_TX1_PCS100G 21 +#define V_PERR_TX1_PCS100G(x) ((x) << S_PERR_TX1_PCS100G) +#define F_PERR_TX1_PCS100G V_PERR_TX1_PCS100G(1U) + +#define S_PERR_TX0_PCS100G 20 +#define V_PERR_TX0_PCS100G(x) ((x) << S_PERR_TX0_PCS100G) +#define F_PERR_TX0_PCS100G V_PERR_TX0_PCS100G(1U) + +#define S_PERR_RX19_PCS100G 19 +#define V_PERR_RX19_PCS100G(x) ((x) << S_PERR_RX19_PCS100G) +#define F_PERR_RX19_PCS100G V_PERR_RX19_PCS100G(1U) + +#define S_PERR_RX18_PCS100G 18 +#define V_PERR_RX18_PCS100G(x) ((x) << S_PERR_RX18_PCS100G) +#define F_PERR_RX18_PCS100G V_PERR_RX18_PCS100G(1U) + +#define S_PERR_RX17_PCS100G 17 +#define V_PERR_RX17_PCS100G(x) ((x) << S_PERR_RX17_PCS100G) +#define F_PERR_RX17_PCS100G V_PERR_RX17_PCS100G(1U) + +#define S_PERR_RX16_PCS100G 16 +#define V_PERR_RX16_PCS100G(x) ((x) << S_PERR_RX16_PCS100G) +#define F_PERR_RX16_PCS100G V_PERR_RX16_PCS100G(1U) + +#define S_PERR_RX15_PCS100G 15 +#define V_PERR_RX15_PCS100G(x) ((x) << S_PERR_RX15_PCS100G) +#define F_PERR_RX15_PCS100G V_PERR_RX15_PCS100G(1U) + +#define S_PERR_RX14_PCS100G 14 +#define V_PERR_RX14_PCS100G(x) ((x) << S_PERR_RX14_PCS100G) +#define F_PERR_RX14_PCS100G V_PERR_RX14_PCS100G(1U) + +#define S_PERR_RX13_PCS100G 13 +#define V_PERR_RX13_PCS100G(x) ((x) << S_PERR_RX13_PCS100G) +#define F_PERR_RX13_PCS100G V_PERR_RX13_PCS100G(1U) + +#define S_PERR_RX12_PCS100G 12 +#define V_PERR_RX12_PCS100G(x) ((x) << S_PERR_RX12_PCS100G) +#define F_PERR_RX12_PCS100G V_PERR_RX12_PCS100G(1U) + +#define S_PERR_RX11_PCS100G 11 +#define V_PERR_RX11_PCS100G(x) ((x) << S_PERR_RX11_PCS100G) +#define F_PERR_RX11_PCS100G V_PERR_RX11_PCS100G(1U) + +#define S_PERR_RX10_PCS100G 10 +#define V_PERR_RX10_PCS100G(x) ((x) << S_PERR_RX10_PCS100G) +#define F_PERR_RX10_PCS100G V_PERR_RX10_PCS100G(1U) + +#define S_PERR_RX9_PCS100G 9 +#define V_PERR_RX9_PCS100G(x) ((x) << S_PERR_RX9_PCS100G) +#define F_PERR_RX9_PCS100G V_PERR_RX9_PCS100G(1U) + +#define S_PERR_RX8_PCS100G 8 +#define V_PERR_RX8_PCS100G(x) ((x) << S_PERR_RX8_PCS100G) +#define F_PERR_RX8_PCS100G V_PERR_RX8_PCS100G(1U) + +#define S_PERR_RX7_PCS100G 7 +#define V_PERR_RX7_PCS100G(x) ((x) << S_PERR_RX7_PCS100G) +#define F_PERR_RX7_PCS100G V_PERR_RX7_PCS100G(1U) + +#define S_PERR_RX6_PCS100G 6 +#define V_PERR_RX6_PCS100G(x) ((x) << S_PERR_RX6_PCS100G) +#define F_PERR_RX6_PCS100G V_PERR_RX6_PCS100G(1U) + +#define S_PERR_RX5_PCS100G 5 +#define V_PERR_RX5_PCS100G(x) ((x) << S_PERR_RX5_PCS100G) +#define F_PERR_RX5_PCS100G V_PERR_RX5_PCS100G(1U) + +#define S_PERR_RX4_PCS100G 4 +#define V_PERR_RX4_PCS100G(x) ((x) << S_PERR_RX4_PCS100G) +#define F_PERR_RX4_PCS100G V_PERR_RX4_PCS100G(1U) + +#define S_PERR_RX3_PCS100G 3 +#define V_PERR_RX3_PCS100G(x) ((x) << S_PERR_RX3_PCS100G) +#define F_PERR_RX3_PCS100G V_PERR_RX3_PCS100G(1U) + +#define S_PERR_RX2_PCS100G 2 +#define V_PERR_RX2_PCS100G(x) ((x) << S_PERR_RX2_PCS100G) +#define F_PERR_RX2_PCS100G V_PERR_RX2_PCS100G(1U) + +#define S_PERR_RX1_PCS100G 1 +#define V_PERR_RX1_PCS100G(x) ((x) << S_PERR_RX1_PCS100G) +#define F_PERR_RX1_PCS100G V_PERR_RX1_PCS100G(1U) + +#define S_PERR_RX0_PCS100G 0 +#define V_PERR_RX0_PCS100G(x) ((x) << S_PERR_RX0_PCS100G) +#define F_PERR_RX0_PCS100G V_PERR_RX0_PCS100G(1U) + +#define A_MAC_PORT_PERR_INT_CAUSE_100G 0x888 +#define A_MAC_PORT_PERR_ENABLE_100G 0x88c +#define A_MAC_PORT_MAC_STAT_DEBUG 0x890 +#define A_MAC_PORT_MAC_25G_50G_AM0 0x894 +#define A_MAC_PORT_MAC_25G_50G_AM1 0x898 +#define A_MAC_PORT_MAC_25G_50G_AM2 0x89c +#define A_MAC_PORT_MAC_25G_50G_AM3 0x8a0 +#define A_MAC_PORT_MAC_AN_STATE_STATUS 0x8a4 #define A_MAC_PORT_EPIO_DATA0 0x8c0 #define A_MAC_PORT_EPIO_DATA1 0x8c4 #define A_MAC_PORT_EPIO_DATA2 0x8c8 @@ -33853,6 +46365,18 @@ #define V_AN_PAGE_RCVD(x) ((x) << S_AN_PAGE_RCVD) #define F_AN_PAGE_RCVD V_AN_PAGE_RCVD(1U) +#define S_PPS 30 +#define V_PPS(x) ((x) << S_PPS) +#define F_PPS V_PPS(1U) + +#define S_SINGLE_ALARM 28 +#define V_SINGLE_ALARM(x) ((x) << S_SINGLE_ALARM) +#define F_SINGLE_ALARM V_SINGLE_ALARM(1U) + +#define S_PERIODIC_ALARM 27 +#define V_PERIODIC_ALARM(x) ((x) << S_PERIODIC_ALARM) +#define F_PERIODIC_ALARM V_PERIODIC_ALARM(1U) + #define A_MAC_PORT_INT_CAUSE 0x8dc #define A_MAC_PORT_PERR_INT_EN 0x8e0 @@ -33956,9 +46480,169 @@ #define V_PERR0_TX(x) ((x) << S_PERR0_TX) #define F_PERR0_TX V_PERR0_TX(1U) +#define S_T6_PERR_PKT_RAM 31 +#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM) +#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U) + +#define S_T6_PERR_MASK_RAM 30 +#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM) +#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U) + +#define S_T6_PERR_CRC_RAM 29 +#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM) +#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U) + +#define S_RX_MAC40G 28 +#define V_RX_MAC40G(x) ((x) << S_RX_MAC40G) +#define F_RX_MAC40G V_RX_MAC40G(1U) + +#define S_TX_MAC40G 27 +#define V_TX_MAC40G(x) ((x) << S_TX_MAC40G) +#define F_TX_MAC40G V_TX_MAC40G(1U) + +#define S_RX_ST_MAC40G 26 +#define V_RX_ST_MAC40G(x) ((x) << S_RX_ST_MAC40G) +#define F_RX_ST_MAC40G V_RX_ST_MAC40G(1U) + +#define S_TX_ST_MAC40G 25 +#define V_TX_ST_MAC40G(x) ((x) << S_TX_ST_MAC40G) +#define F_TX_ST_MAC40G V_TX_ST_MAC40G(1U) + +#define S_TX_MAC1G10G 24 +#define V_TX_MAC1G10G(x) ((x) << S_TX_MAC1G10G) +#define F_TX_MAC1G10G V_TX_MAC1G10G(1U) + +#define S_RX_MAC1G10G 23 +#define V_RX_MAC1G10G(x) ((x) << S_RX_MAC1G10G) +#define F_RX_MAC1G10G V_RX_MAC1G10G(1U) + +#define S_RX_STATUS_MAC1G10G 22 +#define V_RX_STATUS_MAC1G10G(x) ((x) << S_RX_STATUS_MAC1G10G) +#define F_RX_STATUS_MAC1G10G V_RX_STATUS_MAC1G10G(1U) + +#define S_RX_ST_MAC1G10G 21 +#define V_RX_ST_MAC1G10G(x) ((x) << S_RX_ST_MAC1G10G) +#define F_RX_ST_MAC1G10G V_RX_ST_MAC1G10G(1U) + +#define S_TX_ST_MAC1G10G 20 +#define V_TX_ST_MAC1G10G(x) ((x) << S_TX_ST_MAC1G10G) +#define F_TX_ST_MAC1G10G V_TX_ST_MAC1G10G(1U) + +#define S_PERR_TX0_PCS40G 19 +#define V_PERR_TX0_PCS40G(x) ((x) << S_PERR_TX0_PCS40G) +#define F_PERR_TX0_PCS40G V_PERR_TX0_PCS40G(1U) + +#define S_PERR_TX1_PCS40G 18 +#define V_PERR_TX1_PCS40G(x) ((x) << S_PERR_TX1_PCS40G) +#define F_PERR_TX1_PCS40G V_PERR_TX1_PCS40G(1U) + +#define S_PERR_TX2_PCS40G 17 +#define V_PERR_TX2_PCS40G(x) ((x) << S_PERR_TX2_PCS40G) +#define F_PERR_TX2_PCS40G V_PERR_TX2_PCS40G(1U) + +#define S_PERR_TX3_PCS40G 16 +#define V_PERR_TX3_PCS40G(x) ((x) << S_PERR_TX3_PCS40G) +#define F_PERR_TX3_PCS40G V_PERR_TX3_PCS40G(1U) + +#define S_PERR_TX0_FEC40G 15 +#define V_PERR_TX0_FEC40G(x) ((x) << S_PERR_TX0_FEC40G) +#define F_PERR_TX0_FEC40G V_PERR_TX0_FEC40G(1U) + +#define S_PERR_TX1_FEC40G 14 +#define V_PERR_TX1_FEC40G(x) ((x) << S_PERR_TX1_FEC40G) +#define F_PERR_TX1_FEC40G V_PERR_TX1_FEC40G(1U) + +#define S_PERR_TX2_FEC40G 13 +#define V_PERR_TX2_FEC40G(x) ((x) << S_PERR_TX2_FEC40G) +#define F_PERR_TX2_FEC40G V_PERR_TX2_FEC40G(1U) + +#define S_PERR_TX3_FEC40G 12 +#define V_PERR_TX3_FEC40G(x) ((x) << S_PERR_TX3_FEC40G) +#define F_PERR_TX3_FEC40G V_PERR_TX3_FEC40G(1U) + +#define S_PERR_RX0_PCS40G 11 +#define V_PERR_RX0_PCS40G(x) ((x) << S_PERR_RX0_PCS40G) +#define F_PERR_RX0_PCS40G V_PERR_RX0_PCS40G(1U) + +#define S_PERR_RX1_PCS40G 10 +#define V_PERR_RX1_PCS40G(x) ((x) << S_PERR_RX1_PCS40G) +#define F_PERR_RX1_PCS40G V_PERR_RX1_PCS40G(1U) + +#define S_PERR_RX2_PCS40G 9 +#define V_PERR_RX2_PCS40G(x) ((x) << S_PERR_RX2_PCS40G) +#define F_PERR_RX2_PCS40G V_PERR_RX2_PCS40G(1U) + +#define S_PERR_RX3_PCS40G 8 +#define V_PERR_RX3_PCS40G(x) ((x) << S_PERR_RX3_PCS40G) +#define F_PERR_RX3_PCS40G V_PERR_RX3_PCS40G(1U) + +#define S_PERR_RX0_FEC40G 7 +#define V_PERR_RX0_FEC40G(x) ((x) << S_PERR_RX0_FEC40G) +#define F_PERR_RX0_FEC40G V_PERR_RX0_FEC40G(1U) + +#define S_PERR_RX1_FEC40G 6 +#define V_PERR_RX1_FEC40G(x) ((x) << S_PERR_RX1_FEC40G) +#define F_PERR_RX1_FEC40G V_PERR_RX1_FEC40G(1U) + +#define S_PERR_RX2_FEC40G 5 +#define V_PERR_RX2_FEC40G(x) ((x) << S_PERR_RX2_FEC40G) +#define F_PERR_RX2_FEC40G V_PERR_RX2_FEC40G(1U) + +#define S_PERR_RX3_FEC40G 4 +#define V_PERR_RX3_FEC40G(x) ((x) << S_PERR_RX3_FEC40G) +#define F_PERR_RX3_FEC40G V_PERR_RX3_FEC40G(1U) + +#define S_PERR_RX_PCS10G_LPBK 3 +#define V_PERR_RX_PCS10G_LPBK(x) ((x) << S_PERR_RX_PCS10G_LPBK) +#define F_PERR_RX_PCS10G_LPBK V_PERR_RX_PCS10G_LPBK(1U) + +#define S_PERR_RX_PCS10G 2 +#define V_PERR_RX_PCS10G(x) ((x) << S_PERR_RX_PCS10G) +#define F_PERR_RX_PCS10G V_PERR_RX_PCS10G(1U) + +#define S_PERR_RX_PCS1G 1 +#define V_PERR_RX_PCS1G(x) ((x) << S_PERR_RX_PCS1G) +#define F_PERR_RX_PCS1G V_PERR_RX_PCS1G(1U) + +#define S_PERR_TX_PCS1G 0 +#define V_PERR_TX_PCS1G(x) ((x) << S_PERR_TX_PCS1G) +#define F_PERR_TX_PCS1G V_PERR_TX_PCS1G(1U) + #define A_MAC_PORT_PERR_INT_CAUSE 0x8e4 + +#define S_T6_PERR_PKT_RAM 31 +#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM) +#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U) + +#define S_T6_PERR_MASK_RAM 30 +#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM) +#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U) + +#define S_T6_PERR_CRC_RAM 29 +#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM) +#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U) + #define A_MAC_PORT_PERR_ENABLE 0x8e8 + +#define S_T6_PERR_PKT_RAM 31 +#define V_T6_PERR_PKT_RAM(x) ((x) << S_T6_PERR_PKT_RAM) +#define F_T6_PERR_PKT_RAM V_T6_PERR_PKT_RAM(1U) + +#define S_T6_PERR_MASK_RAM 30 +#define V_T6_PERR_MASK_RAM(x) ((x) << S_T6_PERR_MASK_RAM) +#define F_T6_PERR_MASK_RAM V_T6_PERR_MASK_RAM(1U) + +#define S_T6_PERR_CRC_RAM 29 +#define V_T6_PERR_CRC_RAM(x) ((x) << S_T6_PERR_CRC_RAM) +#define F_T6_PERR_CRC_RAM V_T6_PERR_CRC_RAM(1U) + #define A_MAC_PORT_PERR_INJECT 0x8ec + +#define S_MEMSEL_PERR 1 +#define M_MEMSEL_PERR 0x3fU +#define V_MEMSEL_PERR(x) ((x) << S_MEMSEL_PERR) +#define G_MEMSEL_PERR(x) (((x) >> S_MEMSEL_PERR) & M_MEMSEL_PERR) + #define A_MAC_PORT_HSS_CFG0 0x8f0 #define S_HSSREFCLKVALIDA 20 @@ -34189,6 +46873,16 @@ #define V_HSSPLLCONFIGA(x) ((x) << S_HSSPLLCONFIGA) #define G_HSSPLLCONFIGA(x) (((x) >> S_HSSPLLCONFIGA) & M_HSSPLLCONFIGA) +#define S_T6_HSSCALSSTN 22 +#define M_T6_HSSCALSSTN 0x3fU +#define V_T6_HSSCALSSTN(x) ((x) << S_T6_HSSCALSSTN) +#define G_T6_HSSCALSSTN(x) (((x) >> S_T6_HSSCALSSTN) & M_T6_HSSCALSSTN) + +#define S_T6_HSSCALSSTP 16 +#define M_T6_HSSCALSSTP 0x3fU +#define V_T6_HSSCALSSTP(x) ((x) << S_T6_HSSCALSSTP) +#define G_T6_HSSCALSSTP(x) (((x) >> S_T6_HSSCALSSTP) & M_T6_HSSCALSSTP) + #define A_MAC_PORT_HSS_CFG4 0x900 #define S_HSSDIVSELA 9 @@ -34201,6 +46895,24 @@ #define V_HSSDIVSELB(x) ((x) << S_HSSDIVSELB) #define G_HSSDIVSELB(x) (((x) >> S_HSSDIVSELB) & M_HSSDIVSELB) +#define S_HSSREFDIVA 24 +#define M_HSSREFDIVA 0xfU +#define V_HSSREFDIVA(x) ((x) << S_HSSREFDIVA) +#define G_HSSREFDIVA(x) (((x) >> S_HSSREFDIVA) & M_HSSREFDIVA) + +#define S_HSSREFDIVB 20 +#define M_HSSREFDIVB 0xfU +#define V_HSSREFDIVB(x) ((x) << S_HSSREFDIVB) +#define G_HSSREFDIVB(x) (((x) >> S_HSSREFDIVB) & M_HSSREFDIVB) + +#define S_HSSPLLDIV2B 19 +#define V_HSSPLLDIV2B(x) ((x) << S_HSSPLLDIV2B) +#define F_HSSPLLDIV2B V_HSSPLLDIV2B(1U) + +#define S_HSSPLLDIV2A 18 +#define V_HSSPLLDIV2A(x) ((x) << S_HSSPLLDIV2A) +#define F_HSSPLLDIV2A V_HSSPLLDIV2A(1U) + #define A_MAC_PORT_HSS_STATUS 0x904 #define S_HSSPLLLOCKB 3 @@ -34219,6 +46931,22 @@ #define V_HSSPRTREADYA(x) ((x) << S_HSSPRTREADYA) #define F_HSSPRTREADYA V_HSSPRTREADYA(1U) +#define S_RXDERROFLOW 19 +#define V_RXDERROFLOW(x) ((x) << S_RXDERROFLOW) +#define F_RXDERROFLOW V_RXDERROFLOW(1U) + +#define S_RXCERROFLOW 18 +#define V_RXCERROFLOW(x) ((x) << S_RXCERROFLOW) +#define F_RXCERROFLOW V_RXCERROFLOW(1U) + +#define S_RXBERROFLOW 17 +#define V_RXBERROFLOW(x) ((x) << S_RXBERROFLOW) +#define F_RXBERROFLOW V_RXBERROFLOW(1U) + +#define S_RXAERROFLOW 16 +#define V_RXAERROFLOW(x) ((x) << S_RXAERROFLOW) +#define F_RXAERROFLOW V_RXAERROFLOW(1U) + #define A_MAC_PORT_HSS_EEE_STATUS 0x908 #define S_RXAQUIET_STATUS 15 @@ -34607,6 +47335,27 @@ #define V_Q(x) ((x) << S_Q) #define G_Q(x) (((x) >> S_Q) & M_Q) +#define S_ALARM_EN 21 +#define V_ALARM_EN(x) ((x) << S_ALARM_EN) +#define F_ALARM_EN V_ALARM_EN(1U) + +#define S_ALARM_START 20 +#define V_ALARM_START(x) ((x) << S_ALARM_START) +#define F_ALARM_START V_ALARM_START(1U) + +#define S_PPS_EN 19 +#define V_PPS_EN(x) ((x) << S_PPS_EN) +#define F_PPS_EN V_PPS_EN(1U) + +#define A_MAC_PORT_PTP_PPS 0x9b0 +#define A_MAC_PORT_PTP_SINGLE_ALARM 0x9b4 +#define A_MAC_PORT_PTP_PERIODIC_ALARM 0x9b8 +#define A_MAC_PORT_PTP_STATUS 0x9bc + +#define S_ALARM_DONE 0 +#define V_ALARM_DONE(x) ((x) << S_ALARM_DONE) +#define F_ALARM_DONE V_ALARM_DONE(1U) + #define A_MAC_PORT_MTIP_REVISION 0xa00 #define S_CUSTREV 16 @@ -34966,6 +47715,18 @@ #define V_SPEEDSEL0(x) ((x) << S_SPEEDSEL0) #define F_SPEEDSEL0 V_SPEEDSEL0(1U) +#define A_MAC_PORT_MTIP_1G10G_REVISION 0xd00 + +#define S_VER_1G10G 8 +#define M_VER_1G10G 0xffU +#define V_VER_1G10G(x) ((x) << S_VER_1G10G) +#define G_VER_1G10G(x) (((x) >> S_VER_1G10G) & M_VER_1G10G) + +#define S_REV_1G10G 0 +#define M_REV_1G10G 0xffU +#define V_REV_1G10G(x) ((x) << S_REV_1G10G) +#define G_REV_1G10G(x) (((x) >> S_REV_1G10G) & M_REV_1G10G) + #define A_MAC_PORT_MTIP_SGMII_STATUS 0xd04 #define S_100BASET4 15 @@ -35012,8 +47773,52 @@ #define V_EXTDCAPABILITY(x) ((x) << S_EXTDCAPABILITY) #define F_EXTDCAPABILITY V_EXTDCAPABILITY(1U) +#define A_MAC_PORT_MTIP_1G10G_SCRATCH 0xd04 #define A_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_0 0xd08 +#define A_MAC_PORT_MTIP_1G10G_COMMAND_CONFIG 0xd08 + +#define S_SHORT_DISCARD 25 +#define V_SHORT_DISCARD(x) ((x) << S_SHORT_DISCARD) +#define F_SHORT_DISCARD V_SHORT_DISCARD(1U) + +#define S_REG_LOWP_RXEMPTY 24 +#define V_REG_LOWP_RXEMPTY(x) ((x) << S_REG_LOWP_RXEMPTY) +#define F_REG_LOWP_RXEMPTY V_REG_LOWP_RXEMPTY(1U) + +#define S_TX_LOWP_ENA 23 +#define V_TX_LOWP_ENA(x) ((x) << S_TX_LOWP_ENA) +#define F_TX_LOWP_ENA V_TX_LOWP_ENA(1U) + +#define S_TX_FLUSH_EN 22 +#define V_TX_FLUSH_EN(x) ((x) << S_TX_FLUSH_EN) +#define F_TX_FLUSH_EN V_TX_FLUSH_EN(1U) + +#define S_SFD_ANY 21 +#define V_SFD_ANY(x) ((x) << S_SFD_ANY) +#define F_SFD_ANY V_SFD_ANY(1U) + +#define S_COL_CNT_EXT 18 +#define V_COL_CNT_EXT(x) ((x) << S_COL_CNT_EXT) +#define F_COL_CNT_EXT V_COL_CNT_EXT(1U) + +#define S_FORCE_SEND_IDLE 16 +#define V_FORCE_SEND_IDLE(x) ((x) << S_FORCE_SEND_IDLE) +#define F_FORCE_SEND_IDLE V_FORCE_SEND_IDLE(1U) + +#define S_CNTL_FRM_ENA 13 +#define V_CNTL_FRM_ENA(x) ((x) << S_CNTL_FRM_ENA) +#define F_CNTL_FRM_ENA V_CNTL_FRM_ENA(1U) + +#define S_RX_ENAMAC 1 +#define V_RX_ENAMAC(x) ((x) << S_RX_ENAMAC) +#define F_RX_ENAMAC V_RX_ENAMAC(1U) + +#define S_TX_ENAMAC 0 +#define V_TX_ENAMAC(x) ((x) << S_TX_ENAMAC) +#define F_TX_ENAMAC V_TX_ENAMAC(1U) + #define A_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_1 0xd0c +#define A_MAC_PORT_MTIP_1G10G_MAC_ADDR_0 0xd0c #define A_MAC_PORT_MTIP_SGMII_DEV_ABILITY 0xd10 #define S_RF2 13 @@ -35040,6 +47845,7 @@ #define V_FD(x) ((x) << S_FD) #define F_FD V_FD(1U) +#define A_MAC_PORT_MTIP_1G10G_MAC_ADDR_1 0xd10 #define A_MAC_PORT_MTIP_SGMII_PARTNER_ABILITY 0xd14 #define S_CULINKSTATUS 15 @@ -35055,6 +47861,18 @@ #define V_CUSPEED(x) ((x) << S_CUSPEED) #define G_CUSPEED(x) (((x) >> S_CUSPEED) & M_CUSPEED) +#define A_MAC_PORT_MTIP_1G10G_FRM_LENGTH_TX_MTU 0xd14 + +#define S_SET_LEN 16 +#define M_SET_LEN 0xffffU +#define V_SET_LEN(x) ((x) << S_SET_LEN) +#define G_SET_LEN(x) (((x) >> S_SET_LEN) & M_SET_LEN) + +#define S_FRM_LEN_SET 0 +#define M_FRM_LEN_SET 0xffffU +#define V_FRM_LEN_SET(x) ((x) << S_FRM_LEN_SET) +#define G_FRM_LEN_SET(x) (((x) >> S_FRM_LEN_SET) & M_FRM_LEN_SET) + #define A_MAC_PORT_MTIP_SGMII_AN_EXPANSION 0xd18 #define S_PGRCVD 1 @@ -35066,8 +47884,117 @@ #define F_REALTIMEPGRCVD V_REALTIMEPGRCVD(1U) #define A_MAC_PORT_MTIP_SGMII_DEVICE_NP 0xd1c +#define A_MAC_PORT_MTIP_1G10G_RX_FIFO_SECTIONS 0xd1c + +#define S_RX1G10G_EMPTY 16 +#define M_RX1G10G_EMPTY 0xffffU +#define V_RX1G10G_EMPTY(x) ((x) << S_RX1G10G_EMPTY) +#define G_RX1G10G_EMPTY(x) (((x) >> S_RX1G10G_EMPTY) & M_RX1G10G_EMPTY) + +#define S_RX1G10G_AVAIL 0 +#define M_RX1G10G_AVAIL 0xffffU +#define V_RX1G10G_AVAIL(x) ((x) << S_RX1G10G_AVAIL) +#define G_RX1G10G_AVAIL(x) (((x) >> S_RX1G10G_AVAIL) & M_RX1G10G_AVAIL) + #define A_MAC_PORT_MTIP_SGMII_PARTNER_NP 0xd20 +#define A_MAC_PORT_MTIP_1G10G_TX_FIFO_SECTIONS 0xd20 + +#define S_TX1G10G_EMPTY 16 +#define M_TX1G10G_EMPTY 0xffffU +#define V_TX1G10G_EMPTY(x) ((x) << S_TX1G10G_EMPTY) +#define G_TX1G10G_EMPTY(x) (((x) >> S_TX1G10G_EMPTY) & M_TX1G10G_EMPTY) + +#define S_TX1G10G_AVAIL 0 +#define M_TX1G10G_AVAIL 0xffffU +#define V_TX1G10G_AVAIL(x) ((x) << S_TX1G10G_AVAIL) +#define G_TX1G10G_AVAIL(x) (((x) >> S_TX1G10G_AVAIL) & M_TX1G10G_AVAIL) + +#define A_MAC_PORT_MTIP_1G10G_RX_FIFO_ALMOST_F_E 0xd24 + +#define S_ALMOSTFULL 16 +#define M_ALMOSTFULL 0xffffU +#define V_ALMOSTFULL(x) ((x) << S_ALMOSTFULL) +#define G_ALMOSTFULL(x) (((x) >> S_ALMOSTFULL) & M_ALMOSTFULL) + +#define S_ALMOSTEMPTY 0 +#define M_ALMOSTEMPTY 0xffffU +#define V_ALMOSTEMPTY(x) ((x) << S_ALMOSTEMPTY) +#define G_ALMOSTEMPTY(x) (((x) >> S_ALMOSTEMPTY) & M_ALMOSTEMPTY) + +#define A_MAC_PORT_MTIP_1G10G_TX_FIFO_ALMOST_F_E 0xd28 +#define A_MAC_PORT_MTIP_1G10G_HASHTABLE_LOAD 0xd2c +#define A_MAC_PORT_MTIP_1G10G_MDIO_CFG_STATUS 0xd30 + +#define S_CLK_DIVISOR 7 +#define M_CLK_DIVISOR 0x1ffU +#define V_CLK_DIVISOR(x) ((x) << S_CLK_DIVISOR) +#define G_CLK_DIVISOR(x) (((x) >> S_CLK_DIVISOR) & M_CLK_DIVISOR) + +#define S_ENA_CLAUSE 6 +#define V_ENA_CLAUSE(x) ((x) << S_ENA_CLAUSE) +#define F_ENA_CLAUSE V_ENA_CLAUSE(1U) + +#define S_PREAMBLE_DISABLE 5 +#define V_PREAMBLE_DISABLE(x) ((x) << S_PREAMBLE_DISABLE) +#define F_PREAMBLE_DISABLE V_PREAMBLE_DISABLE(1U) + +#define S_HOLD_TIME_SETTING 2 +#define M_HOLD_TIME_SETTING 0x7U +#define V_HOLD_TIME_SETTING(x) ((x) << S_HOLD_TIME_SETTING) +#define G_HOLD_TIME_SETTING(x) (((x) >> S_HOLD_TIME_SETTING) & M_HOLD_TIME_SETTING) + +#define S_MDIO_READ_ERROR 1 +#define V_MDIO_READ_ERROR(x) ((x) << S_MDIO_READ_ERROR) +#define F_MDIO_READ_ERROR V_MDIO_READ_ERROR(1U) + +#define A_MAC_PORT_MTIP_1G10G_MDIO_COMMAND 0xd34 + +#define S_READ_MODE 15 +#define V_READ_MODE(x) ((x) << S_READ_MODE) +#define F_READ_MODE V_READ_MODE(1U) + +#define S_POST_INCR_READ 14 +#define V_POST_INCR_READ(x) ((x) << S_POST_INCR_READ) +#define F_POST_INCR_READ V_POST_INCR_READ(1U) + +#define S_PORT_PHY_ADDR 5 +#define M_PORT_PHY_ADDR 0x1fU +#define V_PORT_PHY_ADDR(x) ((x) << S_PORT_PHY_ADDR) +#define G_PORT_PHY_ADDR(x) (((x) >> S_PORT_PHY_ADDR) & M_PORT_PHY_ADDR) + +#define S_DEVICE_REG_ADDR 0 +#define M_DEVICE_REG_ADDR 0x1fU +#define V_DEVICE_REG_ADDR(x) ((x) << S_DEVICE_REG_ADDR) +#define G_DEVICE_REG_ADDR(x) (((x) >> S_DEVICE_REG_ADDR) & M_DEVICE_REG_ADDR) + +#define A_MAC_PORT_MTIP_1G10G_MDIO_DATA 0xd38 + +#define S_MDIO_DATA 0 +#define M_MDIO_DATA 0xffffU +#define V_MDIO_DATA(x) ((x) << S_MDIO_DATA) +#define G_MDIO_DATA(x) (((x) >> S_MDIO_DATA) & M_MDIO_DATA) + #define A_MAC_PORT_MTIP_SGMII_EXTENDED_STATUS 0xd3c +#define A_MAC_PORT_MTIP_1G10G_MDIO_REGADDR 0xd3c +#define A_MAC_PORT_MTIP_1G10G_STATUS 0xd40 + +#define S_RX_LINT_FAULT 7 +#define V_RX_LINT_FAULT(x) ((x) << S_RX_LINT_FAULT) +#define F_RX_LINT_FAULT V_RX_LINT_FAULT(1U) + +#define S_RX_EMPTY 6 +#define V_RX_EMPTY(x) ((x) << S_RX_EMPTY) +#define F_RX_EMPTY V_RX_EMPTY(1U) + +#define S_TX_EMPTY 5 +#define V_TX_EMPTY(x) ((x) << S_TX_EMPTY) +#define F_TX_EMPTY V_TX_EMPTY(1U) + +#define S_RX_LOWP 4 +#define V_RX_LOWP(x) ((x) << S_RX_LOWP) +#define F_RX_LOWP V_RX_LOWP(1U) + +#define A_MAC_PORT_MTIP_1G10G_TX_IPG_LENGTH 0xd44 #define A_MAC_PORT_MTIP_SGMII_LINK_TIMER_LO 0xd48 #define S_COUNT_LO 0 @@ -35075,6 +48002,7 @@ #define V_COUNT_LO(x) ((x) << S_COUNT_LO) #define G_COUNT_LO(x) (((x) >> S_COUNT_LO) & M_COUNT_LO) +#define A_MAC_PORT_MTIP_1G10G_CREDIT_TRIGGER 0xd48 #define A_MAC_PORT_MTIP_SGMII_LINK_TIMER_HI 0xd4c #define S_COUNT_HI 0 @@ -35082,6 +48010,7 @@ #define V_COUNT_HI(x) ((x) << S_COUNT_HI) #define G_COUNT_HI(x) (((x) >> S_COUNT_HI) & M_COUNT_HI) +#define A_MAC_PORT_MTIP_1G10G_INIT_CREDIT 0xd4c #define A_MAC_PORT_MTIP_SGMII_IF_MODE 0xd50 #define S_SGMII_PCS_ENABLE 5 @@ -35105,6 +48034,272 @@ #define V_SGMII_ENA(x) ((x) << S_SGMII_ENA) #define F_SGMII_ENA V_SGMII_ENA(1U) +#define A_MAC_PORT_MTIP_1G10G_CL01_PAUSE_QUANTA 0xd54 + +#define S_CL1_PAUSE_QUANTA 16 +#define M_CL1_PAUSE_QUANTA 0xffffU +#define V_CL1_PAUSE_QUANTA(x) ((x) << S_CL1_PAUSE_QUANTA) +#define G_CL1_PAUSE_QUANTA(x) (((x) >> S_CL1_PAUSE_QUANTA) & M_CL1_PAUSE_QUANTA) + +#define S_CL0_PAUSE_QUANTA 0 +#define M_CL0_PAUSE_QUANTA 0xffffU +#define V_CL0_PAUSE_QUANTA(x) ((x) << S_CL0_PAUSE_QUANTA) +#define G_CL0_PAUSE_QUANTA(x) (((x) >> S_CL0_PAUSE_QUANTA) & M_CL0_PAUSE_QUANTA) + +#define A_MAC_PORT_MTIP_1G10G_CL23_PAUSE_QUANTA 0xd58 + +#define S_CL3_PAUSE_QUANTA 16 +#define M_CL3_PAUSE_QUANTA 0xffffU +#define V_CL3_PAUSE_QUANTA(x) ((x) << S_CL3_PAUSE_QUANTA) +#define G_CL3_PAUSE_QUANTA(x) (((x) >> S_CL3_PAUSE_QUANTA) & M_CL3_PAUSE_QUANTA) + +#define S_CL2_PAUSE_QUANTA 0 +#define M_CL2_PAUSE_QUANTA 0xffffU +#define V_CL2_PAUSE_QUANTA(x) ((x) << S_CL2_PAUSE_QUANTA) +#define G_CL2_PAUSE_QUANTA(x) (((x) >> S_CL2_PAUSE_QUANTA) & M_CL2_PAUSE_QUANTA) + +#define A_MAC_PORT_MTIP_1G10G_CL45_PAUSE_QUANTA 0xd5c + +#define S_CL5_PAUSE_QUANTA 16 +#define M_CL5_PAUSE_QUANTA 0xffffU +#define V_CL5_PAUSE_QUANTA(x) ((x) << S_CL5_PAUSE_QUANTA) +#define G_CL5_PAUSE_QUANTA(x) (((x) >> S_CL5_PAUSE_QUANTA) & M_CL5_PAUSE_QUANTA) + +#define S_CL4_PAUSE_QUANTA 0 +#define M_CL4_PAUSE_QUANTA 0xffffU +#define V_CL4_PAUSE_QUANTA(x) ((x) << S_CL4_PAUSE_QUANTA) +#define G_CL4_PAUSE_QUANTA(x) (((x) >> S_CL4_PAUSE_QUANTA) & M_CL4_PAUSE_QUANTA) + +#define A_MAC_PORT_MTIP_1G10G_CL67_PAUSE_QUANTA 0xd60 + +#define S_CL7_PAUSE_QUANTA 16 +#define M_CL7_PAUSE_QUANTA 0xffffU +#define V_CL7_PAUSE_QUANTA(x) ((x) << S_CL7_PAUSE_QUANTA) +#define G_CL7_PAUSE_QUANTA(x) (((x) >> S_CL7_PAUSE_QUANTA) & M_CL7_PAUSE_QUANTA) + +#define S_CL6_PAUSE_QUANTA 0 +#define M_CL6_PAUSE_QUANTA 0xffffU +#define V_CL6_PAUSE_QUANTA(x) ((x) << S_CL6_PAUSE_QUANTA) +#define G_CL6_PAUSE_QUANTA(x) (((x) >> S_CL6_PAUSE_QUANTA) & M_CL6_PAUSE_QUANTA) + +#define A_MAC_PORT_MTIP_1G10G_CL01_QUANTA_THRESH 0xd64 + +#define S_CL1_QUANTA_THRESH 16 +#define M_CL1_QUANTA_THRESH 0xffffU +#define V_CL1_QUANTA_THRESH(x) ((x) << S_CL1_QUANTA_THRESH) +#define G_CL1_QUANTA_THRESH(x) (((x) >> S_CL1_QUANTA_THRESH) & M_CL1_QUANTA_THRESH) + +#define S_CL0_QUANTA_THRESH 0 +#define M_CL0_QUANTA_THRESH 0xffffU +#define V_CL0_QUANTA_THRESH(x) ((x) << S_CL0_QUANTA_THRESH) +#define G_CL0_QUANTA_THRESH(x) (((x) >> S_CL0_QUANTA_THRESH) & M_CL0_QUANTA_THRESH) + +#define A_MAC_PORT_MTIP_1G10G_CL23_QUANTA_THRESH 0xd68 + +#define S_CL3_QUANTA_THRESH 16 +#define M_CL3_QUANTA_THRESH 0xffffU +#define V_CL3_QUANTA_THRESH(x) ((x) << S_CL3_QUANTA_THRESH) +#define G_CL3_QUANTA_THRESH(x) (((x) >> S_CL3_QUANTA_THRESH) & M_CL3_QUANTA_THRESH) + +#define S_CL2_QUANTA_THRESH 0 +#define M_CL2_QUANTA_THRESH 0xffffU +#define V_CL2_QUANTA_THRESH(x) ((x) << S_CL2_QUANTA_THRESH) +#define G_CL2_QUANTA_THRESH(x) (((x) >> S_CL2_QUANTA_THRESH) & M_CL2_QUANTA_THRESH) + +#define A_MAC_PORT_MTIP_1G10G_CL45_QUANTA_THRESH 0xd6c + +#define S_CL5_QUANTA_THRESH 16 +#define M_CL5_QUANTA_THRESH 0xffffU +#define V_CL5_QUANTA_THRESH(x) ((x) << S_CL5_QUANTA_THRESH) +#define G_CL5_QUANTA_THRESH(x) (((x) >> S_CL5_QUANTA_THRESH) & M_CL5_QUANTA_THRESH) + +#define S_CL4_QUANTA_THRESH 0 +#define M_CL4_QUANTA_THRESH 0xffffU +#define V_CL4_QUANTA_THRESH(x) ((x) << S_CL4_QUANTA_THRESH) +#define G_CL4_QUANTA_THRESH(x) (((x) >> S_CL4_QUANTA_THRESH) & M_CL4_QUANTA_THRESH) + +#define A_MAC_PORT_MTIP_1G10G_CL67_QUANTA_THRESH 0xd70 + +#define S_CL7_QUANTA_THRESH 16 +#define M_CL7_QUANTA_THRESH 0xffffU +#define V_CL7_QUANTA_THRESH(x) ((x) << S_CL7_QUANTA_THRESH) +#define G_CL7_QUANTA_THRESH(x) (((x) >> S_CL7_QUANTA_THRESH) & M_CL7_QUANTA_THRESH) + +#define S_CL6_QUANTA_THRESH 0 +#define M_CL6_QUANTA_THRESH 0xffffU +#define V_CL6_QUANTA_THRESH(x) ((x) << S_CL6_QUANTA_THRESH) +#define G_CL6_QUANTA_THRESH(x) (((x) >> S_CL6_QUANTA_THRESH) & M_CL6_QUANTA_THRESH) + +#define A_MAC_PORT_MTIP_1G10G_RX_PAUSE_STATUS 0xd74 + +#define S_STATUS_BIT 0 +#define M_STATUS_BIT 0xffU +#define V_STATUS_BIT(x) ((x) << S_STATUS_BIT) +#define G_STATUS_BIT(x) (((x) >> S_STATUS_BIT) & M_STATUS_BIT) + +#define A_MAC_PORT_MTIP_1G10G_TS_TIMESTAMP 0xd7c +#define A_MAC_PORT_MTIP_1G10G_STATN_CONFIG 0xde0 + +#define S_CLEAR 2 +#define V_CLEAR(x) ((x) << S_CLEAR) +#define F_CLEAR V_CLEAR(1U) + +#define S_CLEAR_ON_READ 1 +#define V_CLEAR_ON_READ(x) ((x) << S_CLEAR_ON_READ) +#define F_CLEAR_ON_READ V_CLEAR_ON_READ(1U) + +#define S_SATURATE 0 +#define V_SATURATE(x) ((x) << S_SATURATE) +#define F_SATURATE V_SATURATE(1U) + +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOCTETS 0xe00 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOCTETSHI 0xe04 +#define A_MAC_PORT_MTIP_1G10G_RX_OCTETSOK 0xe08 +#define A_MAC_PORT_MTIP_1G10G_RX_OCTETSOKHI 0xe0c +#define A_MAC_PORT_MTIP_1G10G_RX_AALIGNMENTERRORS 0xe10 +#define A_MAC_PORT_MTIP_1G10G_RX_AALIGNMENTERRORSHI 0xe14 +#define A_MAC_PORT_MTIP_1G10G_RX_APAUSEMACCTRLFRAMES 0xe18 +#define A_MAC_PORT_MTIP_1G10G_RX_APAUSEMACCTRLFRAMESHI 0xe1c +#define A_MAC_PORT_MTIP_1G10G_RX_FRAMESOK 0xe20 +#define A_MAC_PORT_MTIP_1G10G_RX_FRAMESOKHI 0xe24 +#define A_MAC_PORT_MTIP_1G10G_RX_CRCERRORS 0xe28 +#define A_MAC_PORT_MTIP_1G10G_RX_CRCERRORSHI 0xe2c +#define A_MAC_PORT_MTIP_1G10G_RX_VLANOK 0xe30 +#define A_MAC_PORT_MTIP_1G10G_RX_VLANOKHI 0xe34 +#define A_MAC_PORT_MTIP_1G10G_RX_IFINERRORS 0xe38 +#define A_MAC_PORT_MTIP_1G10G_RX_IFINERRORSHI 0xe3c +#define A_MAC_PORT_MTIP_1G10G_RX_IFINUCASTPKTS 0xe40 +#define A_MAC_PORT_MTIP_1G10G_RX_IFINUCASTPKTSHI 0xe44 +#define A_MAC_PORT_MTIP_1G10G_RX_IFINMULTICASTPKTS 0xe48 +#define A_MAC_PORT_MTIP_1G10G_RX_IFINMULTICASTPKTSHI 0xe4c +#define A_MAC_PORT_MTIP_1G10G_RX_IFINBROADCASTPKTS 0xe50 +#define A_MAC_PORT_MTIP_1G10G_RX_IFINBROADCASTPKTSHI 0xe54 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSDROPEVENTS 0xe58 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSDROPEVENTSHI 0xe5c +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS 0xe60 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTSHI 0xe64 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSUNDERSIZEPKTS 0xe68 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSUNDERSIZEPKTSHI 0xe6c +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS64OCTETS 0xe70 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS64OCTETSHI 0xe74 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS65TO127OCTETS 0xe78 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS65TO127OCTETSHI 0xe7c +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS128TO255OCTETS 0xe80 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS128TO255OCTETSHI 0xe84 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS256TO511OCTETS 0xe88 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS256TO511OCTETSHI 0xe8c +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS512TO1023OCTETS 0xe90 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS512TO1023OCTETSHI 0xe94 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1024TO1518OCTETS 0xe98 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1024TO1518OCTETSHI 0xe9c +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1519TOMAX 0xea0 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSPKTS1519TOMAXHI 0xea4 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOVERSIZEPKTS 0xea8 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSOVERSIZEPKTSHI 0xeac +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSJABBERS 0xeb0 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSJABBERSHI 0xeb4 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSFRAGMENTS 0xeb8 +#define A_MAC_PORT_MTIP_1G10G_RX_ETHERSTATSFRAGMENTSHI 0xebc +#define A_MAC_PORT_MTIP_1G10G_AMACCONTROLFRAMESRECEIVED 0xec0 +#define A_MAC_PORT_MTIP_1G10G_AMACCONTROLFRAMESRECEIVEDHI 0xec4 +#define A_MAC_PORT_MTIP_1G10G_RX_AFRAMETOOLONG 0xec8 +#define A_MAC_PORT_MTIP_1G10G_RX_AFRAMETOOLONGHI 0xecc +#define A_MAC_PORT_MTIP_1G10G_RX_AINRANGELENGTHERRORS 0xed0 +#define A_MAC_PORT_MTIP_1G10G_RX_AINRANGELENGTHERRORSHI 0xed4 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSOCTETS 0xf00 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSOCTETSHI 0xf04 +#define A_MAC_PORT_MTIP_1G10G_TX_OCTETSOK 0xf08 +#define A_MAC_PORT_MTIP_1G10G_TX_OCTETSOKHI 0xf0c +#define A_MAC_PORT_MTIP_1G10G_TX_AALIGNMENTERRORS 0xf10 +#define A_MAC_PORT_MTIP_1G10G_TX_AALIGNMENTERRORSHI 0xf14 +#define A_MAC_PORT_MTIP_1G10G_TX_APAUSEMACCTRLFRAMES 0xf18 +#define A_MAC_PORT_MTIP_1G10G_TX_APAUSEMACCTRLFRAMESHI 0xf1c +#define A_MAC_PORT_MTIP_1G10G_TX_FRAMESOK 0xf20 +#define A_MAC_PORT_MTIP_1G10G_TX_FRAMESOKHI 0xf24 +#define A_MAC_PORT_MTIP_1G10G_TX_CRCERRORS 0xf28 +#define A_MAC_PORT_MTIP_1G10G_TX_CRCERRORSHI 0xf2c +#define A_MAC_PORT_MTIP_1G10G_TX_VLANOK 0xf30 +#define A_MAC_PORT_MTIP_1G10G_TX_VLANOKHI 0xf34 +#define A_MAC_PORT_MTIP_1G10G_TX_IFOUTERRORS 0xf38 +#define A_MAC_PORT_MTIP_1G10G_TX_IFOUTERRORSHI 0xf3c +#define A_MAC_PORT_MTIP_1G10G_TX_IFUCASTPKTS 0xf40 +#define A_MAC_PORT_MTIP_1G10G_TX_IFUCASTPKTSHI 0xf44 +#define A_MAC_PORT_MTIP_1G10G_TX_IFMULTICASTPKTS 0xf48 +#define A_MAC_PORT_MTIP_1G10G_TX_IFMULTICASTPKTSHI 0xf4c +#define A_MAC_PORT_MTIP_1G10G_TX_IFBROADCASTPKTS 0xf50 +#define A_MAC_PORT_MTIP_1G10G_TX_IFBROADCASTPKTSHI 0xf54 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSDROPEVENTS 0xf58 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSDROPEVENTSHI 0xf5c +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS 0xf60 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTSHI 0xf64 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSUNDERSIZEPKTS 0xf68 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSUNDERSIZEPKTSHI 0xf6c +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS64OCTETS 0xf70 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS64OCTETSHI 0xf74 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS65TO127OCTETS 0xf78 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS65TO127OCTETSHI 0xf7c +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS128TO255OCTETS 0xf80 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS128TO255OCTETSHI 0xf84 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS256TO511OCTETS 0xf88 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS256TO511OCTETSHI 0xf8c +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS512TO1023OCTETS 0xf90 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS512TO1023OCTETSHI 0xf94 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS1024TO1518OCTETS 0xf98 +#define A_MAC_PORT_MTIP_1G10G_TX_ETHERSTATSPKTS1024TO1518OCTETSHI 0xf9c +#define A_MAC_PORT_MTIP_1G10G_ETHERSTATSPKTS1519TOTX_MTU 0xfa0 +#define A_MAC_PORT_MTIP_1G10G_ETHERSTATSPKTS1519TOTX_MTUHI 0xfa4 +#define A_MAC_PORT_MTIP_1G10G_TX_AMACCONTROLFRAMES 0xfc0 +#define A_MAC_PORT_MTIP_1G10G_TX_AMACCONTROLFRAMESHI 0xfc4 +#define A_MAC_PORT_MTIP_1G10G_IF_MODE 0x1000 + +#define S_MII_ENA_10 4 +#define V_MII_ENA_10(x) ((x) << S_MII_ENA_10) +#define F_MII_ENA_10 V_MII_ENA_10(1U) + +#define S_IF_MODE 0 +#define M_IF_MODE 0x3U +#define V_IF_MODE(x) ((x) << S_IF_MODE) +#define G_IF_MODE(x) (((x) >> S_IF_MODE) & M_IF_MODE) + +#define A_MAC_PORT_MTIP_1G10G_IF_STATUS 0x1004 + +#define S_IF_STATUS_MODE 0 +#define M_IF_STATUS_MODE 0x3U +#define V_IF_STATUS_MODE(x) ((x) << S_IF_STATUS_MODE) +#define G_IF_STATUS_MODE(x) (((x) >> S_IF_STATUS_MODE) & M_IF_STATUS_MODE) + +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_0 0x1080 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_0HI 0x1084 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_1 0x1088 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_1HI 0x108c +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_2 0x1090 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_2HI 0x1094 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_3 0x1098 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_3HI 0x109c +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_4 0x10a0 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_4HI 0x10a4 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_5 0x10a8 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_5HI 0x10ac +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_6 0x10b0 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_6HI 0x10b4 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_7 0x10b8 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESRECEIVED_7HI 0x10bc +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_0 0x10c0 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_0HI 0x10c4 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_1 0x10c8 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_1HI 0x10cc +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_2 0x10d0 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_2HI 0x10d4 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_3 0x10d8 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_3HI 0x10dc +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_4 0x10e0 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_4HI 0x10e4 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_5 0x10e8 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_5HI 0x10ec +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_6 0x10f0 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_6HI 0x10f4 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_7 0x10f8 +#define A_MAC_PORT_MTIP_1G10G_PFCFRAMESTRANSMITTED_7HI 0x10fc #define A_MAC_PORT_MTIP_ACT_CTL_SEG 0x1200 #define S_ACTIVE 0 @@ -35112,6 +48307,28 @@ #define V_ACTIVE(x) ((x) << S_ACTIVE) #define G_ACTIVE(x) (((x) >> S_ACTIVE) & M_ACTIVE) +#define A_T6_MAC_PORT_MTIP_SGMII_CONTROL 0x1200 + +#define S_SPEED_SEL 13 +#define V_SPEED_SEL(x) ((x) << S_SPEED_SEL) +#define F_SPEED_SEL V_SPEED_SEL(1U) + +#define S_PWR_DWN 11 +#define V_PWR_DWN(x) ((x) << S_PWR_DWN) +#define F_PWR_DWN V_PWR_DWN(1U) + +#define S_DUPLEX_MODE 8 +#define V_DUPLEX_MODE(x) ((x) << S_DUPLEX_MODE) +#define F_DUPLEX_MODE V_DUPLEX_MODE(1U) + +#define S_COLLISION_TEST 7 +#define V_COLLISION_TEST(x) ((x) << S_COLLISION_TEST) +#define F_COLLISION_TEST V_COLLISION_TEST(1U) + +#define S_T6_SPEED_SEL1 6 +#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1) +#define F_T6_SPEED_SEL1 V_T6_SPEED_SEL1(1U) + #define A_MAC_PORT_MTIP_MODE_CTL_SEG 0x1204 #define S_MODE_CTL 0 @@ -35119,6 +48336,12 @@ #define V_MODE_CTL(x) ((x) << S_MODE_CTL) #define G_MODE_CTL(x) (((x) >> S_MODE_CTL) & M_MODE_CTL) +#define A_T6_MAC_PORT_MTIP_SGMII_STATUS 0x1204 + +#define S_T6_REM_FAULT 4 +#define V_T6_REM_FAULT(x) ((x) << S_T6_REM_FAULT) +#define F_T6_REM_FAULT V_T6_REM_FAULT(1U) + #define A_MAC_PORT_MTIP_TXCLK_CTL_SEG 0x1208 #define S_TXCLK_CTL 0 @@ -35126,7 +48349,28 @@ #define V_TXCLK_CTL(x) ((x) << S_TXCLK_CTL) #define G_TXCLK_CTL(x) (((x) >> S_TXCLK_CTL) & M_TXCLK_CTL) +#define A_T6_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_0 0x1208 #define A_MAC_PORT_MTIP_TX_PRMBL_CTL_SEG 0x120c +#define A_T6_MAC_PORT_MTIP_SGMII_PHY_IDENTIFIER_1 0x120c +#define A_T6_MAC_PORT_MTIP_SGMII_DEV_ABILITY 0x1210 +#define A_T6_MAC_PORT_MTIP_SGMII_PARTNER_ABILITY 0x1214 +#define A_T6_MAC_PORT_MTIP_SGMII_AN_EXPANSION 0x1218 + +#define S_NEXT_PAGE_ABLE 2 +#define V_NEXT_PAGE_ABLE(x) ((x) << S_NEXT_PAGE_ABLE) +#define F_NEXT_PAGE_ABLE V_NEXT_PAGE_ABLE(1U) + +#define S_PAGE_RECEIVE 1 +#define V_PAGE_RECEIVE(x) ((x) << S_PAGE_RECEIVE) +#define F_PAGE_RECEIVE V_PAGE_RECEIVE(1U) + +#define A_MAC_PORT_MTIP_SGMII_NP_TX 0x121c + +#define S_NP_TX 0 +#define M_NP_TX 0xffffU +#define V_NP_TX(x) ((x) << S_NP_TX) +#define G_NP_TX(x) (((x) >> S_NP_TX) & M_NP_TX) + #define A_MAC_PORT_MTIP_WAN_RS_COL_CNT 0x1220 #define S_COL_CNT 0 @@ -35134,12 +48378,541 @@ #define V_COL_CNT(x) ((x) << S_COL_CNT) #define G_COL_CNT(x) (((x) >> S_COL_CNT) & M_COL_CNT) +#define A_MAC_PORT_MTIP_SGMII_LP_NP_RX 0x1220 + +#define S_LP_NP_RX 0 +#define M_LP_NP_RX 0xffffU +#define V_LP_NP_RX(x) ((x) << S_LP_NP_RX) +#define G_LP_NP_RX(x) (((x) >> S_LP_NP_RX) & M_LP_NP_RX) + +#define A_T6_MAC_PORT_MTIP_SGMII_EXTENDED_STATUS 0x123c + +#define S_EXTENDED_STATUS 0 +#define M_EXTENDED_STATUS 0xffffU +#define V_EXTENDED_STATUS(x) ((x) << S_EXTENDED_STATUS) +#define G_EXTENDED_STATUS(x) (((x) >> S_EXTENDED_STATUS) & M_EXTENDED_STATUS) + #define A_MAC_PORT_MTIP_VL_INTVL 0x1240 #define S_VL_INTVL 1 #define V_VL_INTVL(x) ((x) << S_VL_INTVL) #define F_VL_INTVL V_VL_INTVL(1U) +#define A_MAC_PORT_MTIP_SGMII_SCRATCH 0x1240 + +#define S_SCRATCH 0 +#define M_SCRATCH 0xffffU +#define V_SCRATCH(x) ((x) << S_SCRATCH) +#define G_SCRATCH(x) (((x) >> S_SCRATCH) & M_SCRATCH) + +#define A_MAC_PORT_MTIP_SGMII_REV 0x1244 + +#define S_SGMII_VER 8 +#define M_SGMII_VER 0xffU +#define V_SGMII_VER(x) ((x) << S_SGMII_VER) +#define G_SGMII_VER(x) (((x) >> S_SGMII_VER) & M_SGMII_VER) + +#define S_SGMII_REV 0 +#define M_SGMII_REV 0xffU +#define V_SGMII_REV(x) ((x) << S_SGMII_REV) +#define G_SGMII_REV(x) (((x) >> S_SGMII_REV) & M_SGMII_REV) + +#define A_T6_MAC_PORT_MTIP_SGMII_LINK_TIMER_LO 0x1248 + +#define S_LINK_TIMER_LO 0 +#define M_LINK_TIMER_LO 0xffffU +#define V_LINK_TIMER_LO(x) ((x) << S_LINK_TIMER_LO) +#define G_LINK_TIMER_LO(x) (((x) >> S_LINK_TIMER_LO) & M_LINK_TIMER_LO) + +#define A_T6_MAC_PORT_MTIP_SGMII_LINK_TIMER_HI 0x124c + +#define S_LINK_TIMER_HI 0 +#define M_LINK_TIMER_HI 0xffffU +#define V_LINK_TIMER_HI(x) ((x) << S_LINK_TIMER_HI) +#define G_LINK_TIMER_HI(x) (((x) >> S_LINK_TIMER_HI) & M_LINK_TIMER_HI) + +#define A_T6_MAC_PORT_MTIP_SGMII_IF_MODE 0x1250 + +#define S_SGMII_DUPLEX 4 +#define V_SGMII_DUPLEX(x) ((x) << S_SGMII_DUPLEX) +#define F_SGMII_DUPLEX V_SGMII_DUPLEX(1U) + +#define A_MAC_PORT_MTIP_SGMII_DECODE_ERROR 0x1254 + +#define S_T6_DECODE_ERROR 0 +#define M_T6_DECODE_ERROR 0xffffU +#define V_T6_DECODE_ERROR(x) ((x) << S_T6_DECODE_ERROR) +#define G_T6_DECODE_ERROR(x) (((x) >> S_T6_DECODE_ERROR) & M_T6_DECODE_ERROR) + +#define A_MAC_PORT_MTIP_KR_PCS_CONTROL_1 0x1300 + +#define S_LOW_POWER 11 +#define V_LOW_POWER(x) ((x) << S_LOW_POWER) +#define F_LOW_POWER V_LOW_POWER(1U) + +#define S_T6_SPEED_SEL1 6 +#define V_T6_SPEED_SEL1(x) ((x) << S_T6_SPEED_SEL1) +#define F_T6_SPEED_SEL1 V_T6_SPEED_SEL1(1U) + +#define S_SPEED_SEL2 2 +#define M_SPEED_SEL2 0xfU +#define V_SPEED_SEL2(x) ((x) << S_SPEED_SEL2) +#define G_SPEED_SEL2(x) (((x) >> S_SPEED_SEL2) & M_SPEED_SEL2) + +#define A_MAC_PORT_MTIP_KR_PCS_STATUS_1 0x1304 + +#define S_TX_LPI 11 +#define V_TX_LPI(x) ((x) << S_TX_LPI) +#define F_TX_LPI V_TX_LPI(1U) + +#define S_RX_LPI 10 +#define V_RX_LPI(x) ((x) << S_RX_LPI) +#define F_RX_LPI V_RX_LPI(1U) + +#define S_TX_LPI_ACTIVE 9 +#define V_TX_LPI_ACTIVE(x) ((x) << S_TX_LPI_ACTIVE) +#define F_TX_LPI_ACTIVE V_TX_LPI_ACTIVE(1U) + +#define S_RX_LPI_ACTIVE 8 +#define V_RX_LPI_ACTIVE(x) ((x) << S_RX_LPI_ACTIVE) +#define F_RX_LPI_ACTIVE V_RX_LPI_ACTIVE(1U) + +#define S_FAULT 7 +#define V_FAULT(x) ((x) << S_FAULT) +#define F_FAULT V_FAULT(1U) + +#define S_PCS_RX_LINK_STAT 2 +#define V_PCS_RX_LINK_STAT(x) ((x) << S_PCS_RX_LINK_STAT) +#define F_PCS_RX_LINK_STAT V_PCS_RX_LINK_STAT(1U) + +#define S_LOW_POWER_ABILITY 1 +#define V_LOW_POWER_ABILITY(x) ((x) << S_LOW_POWER_ABILITY) +#define F_LOW_POWER_ABILITY V_LOW_POWER_ABILITY(1U) + +#define A_MAC_PORT_MTIP_KR_PCS_DEVICE_IDENTIFIER_1 0x1308 +#define A_MAC_PORT_MTIP_KR_PCS_DEVICE_IDENTIFIER_2 0x130c +#define A_MAC_PORT_MTIP_KR_PCS_SPEED_ABILITY 0x1310 + +#define S_10G_CAPABLE 0 +#define V_10G_CAPABLE(x) ((x) << S_10G_CAPABLE) +#define F_10G_CAPABLE V_10G_CAPABLE(1U) + +#define A_MAC_PORT_MTIP_KR_PCS_DEVICES_IN_PACKAGELO 0x1314 + +#define S_AUTO_NEGOTIATION_PRESENT 7 +#define V_AUTO_NEGOTIATION_PRESENT(x) ((x) << S_AUTO_NEGOTIATION_PRESENT) +#define F_AUTO_NEGOTIATION_PRESENT V_AUTO_NEGOTIATION_PRESENT(1U) + +#define S_DTE_XS_PRESENT 5 +#define V_DTE_XS_PRESENT(x) ((x) << S_DTE_XS_PRESENT) +#define F_DTE_XS_PRESENT V_DTE_XS_PRESENT(1U) + +#define S_PHY_XS_PRESENT 4 +#define V_PHY_XS_PRESENT(x) ((x) << S_PHY_XS_PRESENT) +#define F_PHY_XS_PRESENT V_PHY_XS_PRESENT(1U) + +#define S_PCS_PRESENT 3 +#define V_PCS_PRESENT(x) ((x) << S_PCS_PRESENT) +#define F_PCS_PRESENT V_PCS_PRESENT(1U) + +#define S_WIS_PRESENT 2 +#define V_WIS_PRESENT(x) ((x) << S_WIS_PRESENT) +#define F_WIS_PRESENT V_WIS_PRESENT(1U) + +#define S_PMD_PMA_PRESENT 1 +#define V_PMD_PMA_PRESENT(x) ((x) << S_PMD_PMA_PRESENT) +#define F_PMD_PMA_PRESENT V_PMD_PMA_PRESENT(1U) + +#define S_CLAUSE_22_REG_PRESENT 0 +#define V_CLAUSE_22_REG_PRESENT(x) ((x) << S_CLAUSE_22_REG_PRESENT) +#define F_CLAUSE_22_REG_PRESENT V_CLAUSE_22_REG_PRESENT(1U) + +#define A_MAC_PORT_MTIP_KR_PCS_DEVICES_IN_PACKAGEHI 0x1318 +#define A_MAC_PORT_MTIP_KR_PCS_CONTROL_2 0x131c + +#define S_PCS_TYPE_SELECTION 0 +#define M_PCS_TYPE_SELECTION 0x3U +#define V_PCS_TYPE_SELECTION(x) ((x) << S_PCS_TYPE_SELECTION) +#define G_PCS_TYPE_SELECTION(x) (((x) >> S_PCS_TYPE_SELECTION) & M_PCS_TYPE_SELECTION) + +#define A_MAC_PORT_MTIP_KR_PCS_STATUS_2 0x1320 + +#define S_DEVICE_PRESENT 14 +#define M_DEVICE_PRESENT 0x3U +#define V_DEVICE_PRESENT(x) ((x) << S_DEVICE_PRESENT) +#define G_DEVICE_PRESENT(x) (((x) >> S_DEVICE_PRESENT) & M_DEVICE_PRESENT) + +#define S_TRANSMIT_FAULT 11 +#define V_TRANSMIT_FAULT(x) ((x) << S_TRANSMIT_FAULT) +#define F_TRANSMIT_FAULT V_TRANSMIT_FAULT(1U) + +#define S_RECEIVE_FAULT 10 +#define V_RECEIVE_FAULT(x) ((x) << S_RECEIVE_FAULT) +#define F_RECEIVE_FAULT V_RECEIVE_FAULT(1U) + +#define S_10GBASE_W_CAPABLE 2 +#define V_10GBASE_W_CAPABLE(x) ((x) << S_10GBASE_W_CAPABLE) +#define F_10GBASE_W_CAPABLE V_10GBASE_W_CAPABLE(1U) + +#define S_10GBASE_X_CAPABLE 1 +#define V_10GBASE_X_CAPABLE(x) ((x) << S_10GBASE_X_CAPABLE) +#define F_10GBASE_X_CAPABLE V_10GBASE_X_CAPABLE(1U) + +#define S_10GBASE_R_CAPABLE 0 +#define V_10GBASE_R_CAPABLE(x) ((x) << S_10GBASE_R_CAPABLE) +#define F_10GBASE_R_CAPABLE V_10GBASE_R_CAPABLE(1U) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_PACKAGE_IDENTIFIER_LO 0x1338 + +#define S_PCS_PACKAGE_IDENTIFIER_LO 0 +#define M_PCS_PACKAGE_IDENTIFIER_LO 0xffffU +#define V_PCS_PACKAGE_IDENTIFIER_LO(x) ((x) << S_PCS_PACKAGE_IDENTIFIER_LO) +#define G_PCS_PACKAGE_IDENTIFIER_LO(x) (((x) >> S_PCS_PACKAGE_IDENTIFIER_LO) & M_PCS_PACKAGE_IDENTIFIER_LO) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_PACKAGE_IDENTIFIER_HI 0x133c + +#define S_PCS_PACKAGE_IDENTIFIER_HI 0 +#define M_PCS_PACKAGE_IDENTIFIER_HI 0xffffU +#define V_PCS_PACKAGE_IDENTIFIER_HI(x) ((x) << S_PCS_PACKAGE_IDENTIFIER_HI) +#define G_PCS_PACKAGE_IDENTIFIER_HI(x) (((x) >> S_PCS_PACKAGE_IDENTIFIER_HI) & M_PCS_PACKAGE_IDENTIFIER_HI) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_STATUS_1 0x1380 + +#define S_10GBASE_R_RX_LINK_STATUS 12 +#define V_10GBASE_R_RX_LINK_STATUS(x) ((x) << S_10GBASE_R_RX_LINK_STATUS) +#define F_10GBASE_R_RX_LINK_STATUS V_10GBASE_R_RX_LINK_STATUS(1U) + +#define S_PRBS9_PTTRN_TSTNG_ABILITY 3 +#define V_PRBS9_PTTRN_TSTNG_ABILITY(x) ((x) << S_PRBS9_PTTRN_TSTNG_ABILITY) +#define F_PRBS9_PTTRN_TSTNG_ABILITY V_PRBS9_PTTRN_TSTNG_ABILITY(1U) + +#define S_PRBS31_PTTRN_TSTNG_ABILITY 2 +#define V_PRBS31_PTTRN_TSTNG_ABILITY(x) ((x) << S_PRBS31_PTTRN_TSTNG_ABILITY) +#define F_PRBS31_PTTRN_TSTNG_ABILITY V_PRBS31_PTTRN_TSTNG_ABILITY(1U) + +#define S_10GBASE_R_PCS_HIGH_BER 1 +#define V_10GBASE_R_PCS_HIGH_BER(x) ((x) << S_10GBASE_R_PCS_HIGH_BER) +#define F_10GBASE_R_PCS_HIGH_BER V_10GBASE_R_PCS_HIGH_BER(1U) + +#define S_10GBASE_R_PCS_BLOCK_LOCK 0 +#define V_10GBASE_R_PCS_BLOCK_LOCK(x) ((x) << S_10GBASE_R_PCS_BLOCK_LOCK) +#define F_10GBASE_R_PCS_BLOCK_LOCK V_10GBASE_R_PCS_BLOCK_LOCK(1U) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_STATUS_2 0x1384 + +#define S_LATCHED_BLOCK_LOCK 15 +#define V_LATCHED_BLOCK_LOCK(x) ((x) << S_LATCHED_BLOCK_LOCK) +#define F_LATCHED_BLOCK_LOCK V_LATCHED_BLOCK_LOCK(1U) + +#define S_LATCHED_HIGH_BER 14 +#define V_LATCHED_HIGH_BER(x) ((x) << S_LATCHED_HIGH_BER) +#define F_LATCHED_HIGH_BER V_LATCHED_HIGH_BER(1U) + +#define S_BERBER_COUNTER 8 +#define M_BERBER_COUNTER 0x3fU +#define V_BERBER_COUNTER(x) ((x) << S_BERBER_COUNTER) +#define G_BERBER_COUNTER(x) (((x) >> S_BERBER_COUNTER) & M_BERBER_COUNTER) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_0 0x1388 + +#define S_TEST_PATTERN_SEED_A0 0 +#define M_TEST_PATTERN_SEED_A0 0xffffU +#define V_TEST_PATTERN_SEED_A0(x) ((x) << S_TEST_PATTERN_SEED_A0) +#define G_TEST_PATTERN_SEED_A0(x) (((x) >> S_TEST_PATTERN_SEED_A0) & M_TEST_PATTERN_SEED_A0) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_1 0x138c + +#define S_TEST_PATTERN_SEED_A1 0 +#define M_TEST_PATTERN_SEED_A1 0xffffU +#define V_TEST_PATTERN_SEED_A1(x) ((x) << S_TEST_PATTERN_SEED_A1) +#define G_TEST_PATTERN_SEED_A1(x) (((x) >> S_TEST_PATTERN_SEED_A1) & M_TEST_PATTERN_SEED_A1) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_2 0x1390 + +#define S_TEST_PATTERN_SEED_A2 0 +#define M_TEST_PATTERN_SEED_A2 0xffffU +#define V_TEST_PATTERN_SEED_A2(x) ((x) << S_TEST_PATTERN_SEED_A2) +#define G_TEST_PATTERN_SEED_A2(x) (((x) >> S_TEST_PATTERN_SEED_A2) & M_TEST_PATTERN_SEED_A2) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_A_3 0x1394 + +#define S_TEST_PATTERN_SEED_A3 0 +#define M_TEST_PATTERN_SEED_A3 0x3ffU +#define V_TEST_PATTERN_SEED_A3(x) ((x) << S_TEST_PATTERN_SEED_A3) +#define G_TEST_PATTERN_SEED_A3(x) (((x) >> S_TEST_PATTERN_SEED_A3) & M_TEST_PATTERN_SEED_A3) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_0 0x1398 + +#define S_TEST_PATTERN_SEED_B0 0 +#define M_TEST_PATTERN_SEED_B0 0xffffU +#define V_TEST_PATTERN_SEED_B0(x) ((x) << S_TEST_PATTERN_SEED_B0) +#define G_TEST_PATTERN_SEED_B0(x) (((x) >> S_TEST_PATTERN_SEED_B0) & M_TEST_PATTERN_SEED_B0) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_1 0x139c + +#define S_TEST_PATTERN_SEED_B1 0 +#define M_TEST_PATTERN_SEED_B1 0xffffU +#define V_TEST_PATTERN_SEED_B1(x) ((x) << S_TEST_PATTERN_SEED_B1) +#define G_TEST_PATTERN_SEED_B1(x) (((x) >> S_TEST_PATTERN_SEED_B1) & M_TEST_PATTERN_SEED_B1) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_2 0x13a0 + +#define S_TEST_PATTERN_SEED_B2 0 +#define M_TEST_PATTERN_SEED_B2 0xffffU +#define V_TEST_PATTERN_SEED_B2(x) ((x) << S_TEST_PATTERN_SEED_B2) +#define G_TEST_PATTERN_SEED_B2(x) (((x) >> S_TEST_PATTERN_SEED_B2) & M_TEST_PATTERN_SEED_B2) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_SEED_B_3 0x13a4 + +#define S_TEST_PATTERN_SEED_B3 0 +#define M_TEST_PATTERN_SEED_B3 0x3ffU +#define V_TEST_PATTERN_SEED_B3(x) ((x) << S_TEST_PATTERN_SEED_B3) +#define G_TEST_PATTERN_SEED_B3(x) (((x) >> S_TEST_PATTERN_SEED_B3) & M_TEST_PATTERN_SEED_B3) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_CONTROL 0x13a8 + +#define S_PRBS9_TX_TST_PTTRN_EN 6 +#define V_PRBS9_TX_TST_PTTRN_EN(x) ((x) << S_PRBS9_TX_TST_PTTRN_EN) +#define F_PRBS9_TX_TST_PTTRN_EN V_PRBS9_TX_TST_PTTRN_EN(1U) + +#define S_PRBS31_RX_TST_PTTRN_EN 5 +#define V_PRBS31_RX_TST_PTTRN_EN(x) ((x) << S_PRBS31_RX_TST_PTTRN_EN) +#define F_PRBS31_RX_TST_PTTRN_EN V_PRBS31_RX_TST_PTTRN_EN(1U) + +#define S_PRBS31_TX_TST_PTTRN_EN 4 +#define V_PRBS31_TX_TST_PTTRN_EN(x) ((x) << S_PRBS31_TX_TST_PTTRN_EN) +#define F_PRBS31_TX_TST_PTTRN_EN V_PRBS31_TX_TST_PTTRN_EN(1U) + +#define S_TX_TEST_PATTERN_EN 3 +#define V_TX_TEST_PATTERN_EN(x) ((x) << S_TX_TEST_PATTERN_EN) +#define F_TX_TEST_PATTERN_EN V_TX_TEST_PATTERN_EN(1U) + +#define S_RX_TEST_PATTERN_EN 2 +#define V_RX_TEST_PATTERN_EN(x) ((x) << S_RX_TEST_PATTERN_EN) +#define F_RX_TEST_PATTERN_EN V_RX_TEST_PATTERN_EN(1U) + +#define S_TEST_PATTERN_SELECT 1 +#define V_TEST_PATTERN_SELECT(x) ((x) << S_TEST_PATTERN_SELECT) +#define F_TEST_PATTERN_SELECT V_TEST_PATTERN_SELECT(1U) + +#define S_DATA_PATTERN_SELECT 0 +#define V_DATA_PATTERN_SELECT(x) ((x) << S_DATA_PATTERN_SELECT) +#define F_DATA_PATTERN_SELECT V_DATA_PATTERN_SELECT(1U) + +#define A_MAC_PORT_MTIP_KR_10GBASE_R_PCS_TEST_PATTERN_ERROR_COUNTER 0x13ac + +#define S_TEST_PATTERN_ERR_CNTR 0 +#define M_TEST_PATTERN_ERR_CNTR 0xffffU +#define V_TEST_PATTERN_ERR_CNTR(x) ((x) << S_TEST_PATTERN_ERR_CNTR) +#define G_TEST_PATTERN_ERR_CNTR(x) (((x) >> S_TEST_PATTERN_ERR_CNTR) & M_TEST_PATTERN_ERR_CNTR) + +#define A_MAC_PORT_MTIP_KR_VENDOR_SPECIFIC_PCS_STATUS 0x13b4 + +#define S_TRANSMIT_FIFO_FAULT 1 +#define V_TRANSMIT_FIFO_FAULT(x) ((x) << S_TRANSMIT_FIFO_FAULT) +#define F_TRANSMIT_FIFO_FAULT V_TRANSMIT_FIFO_FAULT(1U) + +#define S_RECEIVE_FIFO_FAULT 0 +#define V_RECEIVE_FIFO_FAULT(x) ((x) << S_RECEIVE_FIFO_FAULT) +#define F_RECEIVE_FIFO_FAULT V_RECEIVE_FIFO_FAULT(1U) + +#define A_MAC_PORT_MTIP_KR4_CONTROL_1 0x1400 + +#define S_SPEED_SELECTION 13 +#define V_SPEED_SELECTION(x) ((x) << S_SPEED_SELECTION) +#define F_SPEED_SELECTION V_SPEED_SELECTION(1U) + +#define S_SPEED_SELECTION1 6 +#define V_SPEED_SELECTION1(x) ((x) << S_SPEED_SELECTION1) +#define F_SPEED_SELECTION1 V_SPEED_SELECTION1(1U) + +#define S_SPEED_SELECTION2 2 +#define M_SPEED_SELECTION2 0xfU +#define V_SPEED_SELECTION2(x) ((x) << S_SPEED_SELECTION2) +#define G_SPEED_SELECTION2(x) (((x) >> S_SPEED_SELECTION2) & M_SPEED_SELECTION2) + +#define A_MAC_PORT_MTIP_KR4_STATUS_1 0x1404 + +#define S_RECEIVE_LINK_STAT 2 +#define V_RECEIVE_LINK_STAT(x) ((x) << S_RECEIVE_LINK_STAT) +#define F_RECEIVE_LINK_STAT V_RECEIVE_LINK_STAT(1U) + +#define A_MAC_PORT_MTIP_KR4_DEVICE_ID0 0x1408 +#define A_MAC_PORT_MTIP_KR4_DEVICE_ID1 0x140c + +#define S_T6_DEVICE_ID1 16 +#define M_T6_DEVICE_ID1 0xffffU +#define V_T6_DEVICE_ID1(x) ((x) << S_T6_DEVICE_ID1) +#define G_T6_DEVICE_ID1(x) (((x) >> S_T6_DEVICE_ID1) & M_T6_DEVICE_ID1) + +#define A_MAC_PORT_MTIP_KR4_SPEED_ABILITY 0x1410 + +#define S_100G_CAPABLE 3 +#define V_100G_CAPABLE(x) ((x) << S_100G_CAPABLE) +#define F_100G_CAPABLE V_100G_CAPABLE(1U) + +#define S_40G_CAPABLE 2 +#define V_40G_CAPABLE(x) ((x) << S_40G_CAPABLE) +#define F_40G_CAPABLE V_40G_CAPABLE(1U) + +#define S_10PASS_TS_2BASE_TL_CAPABLE 1 +#define V_10PASS_TS_2BASE_TL_CAPABLE(x) ((x) << S_10PASS_TS_2BASE_TL_CAPABLE) +#define F_10PASS_TS_2BASE_TL_CAPABLE V_10PASS_TS_2BASE_TL_CAPABLE(1U) + +#define A_MAC_PORT_MTIP_KR4_DEVICES_IN_PKG1 0x1414 + +#define S_CLAUSE_22_REG 0 +#define V_CLAUSE_22_REG(x) ((x) << S_CLAUSE_22_REG) +#define F_CLAUSE_22_REG V_CLAUSE_22_REG(1U) + +#define A_MAC_PORT_MTIP_KR4_DEVICES_IN_PKG2 0x1418 + +#define S_VENDOR_SPECIFIC_DEVICE 15 +#define V_VENDOR_SPECIFIC_DEVICE(x) ((x) << S_VENDOR_SPECIFIC_DEVICE) +#define F_VENDOR_SPECIFIC_DEVICE V_VENDOR_SPECIFIC_DEVICE(1U) + +#define S_VENDOR_SPECIFIC_DEVICE1 14 +#define V_VENDOR_SPECIFIC_DEVICE1(x) ((x) << S_VENDOR_SPECIFIC_DEVICE1) +#define F_VENDOR_SPECIFIC_DEVICE1 V_VENDOR_SPECIFIC_DEVICE1(1U) + +#define S_CLAUSE_22_EXT 13 +#define V_CLAUSE_22_EXT(x) ((x) << S_CLAUSE_22_EXT) +#define F_CLAUSE_22_EXT V_CLAUSE_22_EXT(1U) + +#define A_MAC_PORT_MTIP_KR4_CONTROL_2 0x141c + +#define S_PCS_TYPE_SEL 0 +#define M_PCS_TYPE_SEL 0x7U +#define V_PCS_TYPE_SEL(x) ((x) << S_PCS_TYPE_SEL) +#define G_PCS_TYPE_SEL(x) (((x) >> S_PCS_TYPE_SEL) & M_PCS_TYPE_SEL) + +#define A_MAC_PORT_MTIP_KR4_STATUS_2 0x1420 + +#define S_100GBASE_R_CAPABLE 5 +#define V_100GBASE_R_CAPABLE(x) ((x) << S_100GBASE_R_CAPABLE) +#define F_100GBASE_R_CAPABLE V_100GBASE_R_CAPABLE(1U) + +#define S_40GBASE_R_CAPABLE 4 +#define V_40GBASE_R_CAPABLE(x) ((x) << S_40GBASE_R_CAPABLE) +#define F_40GBASE_R_CAPABLE V_40GBASE_R_CAPABLE(1U) + +#define S_10GBASE_T_CAPABLE 3 +#define V_10GBASE_T_CAPABLE(x) ((x) << S_10GBASE_T_CAPABLE) +#define F_10GBASE_T_CAPABLE V_10GBASE_T_CAPABLE(1U) + +#define A_MAC_PORT_MTIP_KR4_PKG_ID0 0x1438 +#define A_MAC_PORT_MTIP_KR4_PKG_ID1 0x143c +#define A_MAC_PORT_MTIP_KR4_BASE_R_STATUS_1 0x1480 + +#define S_T6_RX_LINK_STATUS 12 +#define V_T6_RX_LINK_STATUS(x) ((x) << S_T6_RX_LINK_STATUS) +#define F_T6_RX_LINK_STATUS V_T6_RX_LINK_STATUS(1U) + +#define S_HIGH_BER 1 +#define V_HIGH_BER(x) ((x) << S_HIGH_BER) +#define F_HIGH_BER V_HIGH_BER(1U) + +#define S_KR4_BLOCK_LOCK 0 +#define V_KR4_BLOCK_LOCK(x) ((x) << S_KR4_BLOCK_LOCK) +#define F_KR4_BLOCK_LOCK V_KR4_BLOCK_LOCK(1U) + +#define A_MAC_PORT_MTIP_KR4_BASE_R_STATUS_2 0x1484 + +#define S_LATCHED_BL_LK 15 +#define V_LATCHED_BL_LK(x) ((x) << S_LATCHED_BL_LK) +#define F_LATCHED_BL_LK V_LATCHED_BL_LK(1U) + +#define S_LATCHED_HG_BR 14 +#define V_LATCHED_HG_BR(x) ((x) << S_LATCHED_HG_BR) +#define F_LATCHED_HG_BR V_LATCHED_HG_BR(1U) + +#define S_BER_CNT 8 +#define M_BER_CNT 0x3fU +#define V_BER_CNT(x) ((x) << S_BER_CNT) +#define G_BER_CNT(x) (((x) >> S_BER_CNT) & M_BER_CNT) + +#define S_ERR_BL_CNT 0 +#define M_ERR_BL_CNT 0xffU +#define V_ERR_BL_CNT(x) ((x) << S_ERR_BL_CNT) +#define G_ERR_BL_CNT(x) (((x) >> S_ERR_BL_CNT) & M_ERR_BL_CNT) + +#define A_MAC_PORT_MTIP_KR4_BASE_R_TEST_CONTROL 0x14a8 + +#define S_TX_TP_EN 3 +#define V_TX_TP_EN(x) ((x) << S_TX_TP_EN) +#define F_TX_TP_EN V_TX_TP_EN(1U) + +#define S_RX_TP_EN 2 +#define V_RX_TP_EN(x) ((x) << S_RX_TP_EN) +#define F_RX_TP_EN V_RX_TP_EN(1U) + +#define A_MAC_PORT_MTIP_KR4_BASE_R_TEST_ERR_CNT 0x14ac + +#define S_TP_ERR_CNTR 0 +#define M_TP_ERR_CNTR 0xffffU +#define V_TP_ERR_CNTR(x) ((x) << S_TP_ERR_CNTR) +#define G_TP_ERR_CNTR(x) (((x) >> S_TP_ERR_CNTR) & M_TP_ERR_CNTR) + +#define A_MAC_PORT_MTIP_KR4_BER_HIGH_ORDER_CNT 0x14b0 + +#define S_BER_HI_ORDER_CNT 0 +#define M_BER_HI_ORDER_CNT 0xffffU +#define V_BER_HI_ORDER_CNT(x) ((x) << S_BER_HI_ORDER_CNT) +#define G_BER_HI_ORDER_CNT(x) (((x) >> S_BER_HI_ORDER_CNT) & M_BER_HI_ORDER_CNT) + +#define A_MAC_PORT_MTIP_KR4_ERR_BLK_HIGH_ORDER_CNT 0x14b4 + +#define S_HI_ORDER_CNT_EN 15 +#define V_HI_ORDER_CNT_EN(x) ((x) << S_HI_ORDER_CNT_EN) +#define F_HI_ORDER_CNT_EN V_HI_ORDER_CNT_EN(1U) + +#define S_ERR_BLK_CNTR 0 +#define M_ERR_BLK_CNTR 0x3fffU +#define V_ERR_BLK_CNTR(x) ((x) << S_ERR_BLK_CNTR) +#define G_ERR_BLK_CNTR(x) (((x) >> S_ERR_BLK_CNTR) & M_ERR_BLK_CNTR) + +#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_1 0x14c8 + +#define S_LANE_ALIGN_STATUS 12 +#define V_LANE_ALIGN_STATUS(x) ((x) << S_LANE_ALIGN_STATUS) +#define F_LANE_ALIGN_STATUS V_LANE_ALIGN_STATUS(1U) + +#define S_LANE_3_BLK_LCK 3 +#define V_LANE_3_BLK_LCK(x) ((x) << S_LANE_3_BLK_LCK) +#define F_LANE_3_BLK_LCK V_LANE_3_BLK_LCK(1U) + +#define S_LANE_2_BLK_LC32_6431K 2 +#define V_LANE_2_BLK_LC32_6431K(x) ((x) << S_LANE_2_BLK_LC32_6431K) +#define F_LANE_2_BLK_LC32_6431K V_LANE_2_BLK_LC32_6431K(1U) + +#define S_LANE_1_BLK_LCK 1 +#define V_LANE_1_BLK_LCK(x) ((x) << S_LANE_1_BLK_LCK) +#define F_LANE_1_BLK_LCK V_LANE_1_BLK_LCK(1U) + +#define S_LANE_0_BLK_LCK 0 +#define V_LANE_0_BLK_LCK(x) ((x) << S_LANE_0_BLK_LCK) +#define F_LANE_0_BLK_LCK V_LANE_0_BLK_LCK(1U) + +#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_2 0x14cc +#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_3 0x14d0 + +#define S_LANE_3_ALIGN_MRKR_LCK 3 +#define V_LANE_3_ALIGN_MRKR_LCK(x) ((x) << S_LANE_3_ALIGN_MRKR_LCK) +#define F_LANE_3_ALIGN_MRKR_LCK V_LANE_3_ALIGN_MRKR_LCK(1U) + +#define S_LANE_2_ALIGN_MRKR_LCK 2 +#define V_LANE_2_ALIGN_MRKR_LCK(x) ((x) << S_LANE_2_ALIGN_MRKR_LCK) +#define F_LANE_2_ALIGN_MRKR_LCK V_LANE_2_ALIGN_MRKR_LCK(1U) + +#define S_LANE_1_ALIGN_MRKR_LCK 1 +#define V_LANE_1_ALIGN_MRKR_LCK(x) ((x) << S_LANE_1_ALIGN_MRKR_LCK) +#define F_LANE_1_ALIGN_MRKR_LCK V_LANE_1_ALIGN_MRKR_LCK(1U) + +#define S_LANE_0_ALIGN_MRKR_LCK 0 +#define V_LANE_0_ALIGN_MRKR_LCK(x) ((x) << S_LANE_0_ALIGN_MRKR_LCK) +#define F_LANE_0_ALIGN_MRKR_LCK V_LANE_0_ALIGN_MRKR_LCK(1U) + +#define A_MAC_PORT_MTIP_KR4_MULTI_LANE_ALIGN_STATUS_4 0x14d4 #define A_MAC_PORT_MTIP_MDIO_CFG_STATUS 0x1600 #define S_CLK_DIV 7 @@ -35206,14 +48979,40 @@ #define V_MDIO_ADDR(x) ((x) << S_MDIO_ADDR) #define G_MDIO_ADDR(x) (((x) >> S_MDIO_ADDR) & M_MDIO_ADDR) +#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_0 0x1720 + +#define S_BIP_ERR_CNT_LANE_0 0 +#define M_BIP_ERR_CNT_LANE_0 0xffffU +#define V_BIP_ERR_CNT_LANE_0(x) ((x) << S_BIP_ERR_CNT_LANE_0) +#define G_BIP_ERR_CNT_LANE_0(x) (((x) >> S_BIP_ERR_CNT_LANE_0) & M_BIP_ERR_CNT_LANE_0) + +#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_1 0x1724 + +#define S_BIP_ERR_CNT_LANE_1 0 +#define M_BIP_ERR_CNT_LANE_1 0xffffU +#define V_BIP_ERR_CNT_LANE_1(x) ((x) << S_BIP_ERR_CNT_LANE_1) +#define G_BIP_ERR_CNT_LANE_1(x) (((x) >> S_BIP_ERR_CNT_LANE_1) & M_BIP_ERR_CNT_LANE_1) + +#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_2 0x1728 + +#define S_BIP_ERR_CNT_LANE_2 0 +#define M_BIP_ERR_CNT_LANE_2 0xffffU +#define V_BIP_ERR_CNT_LANE_2(x) ((x) << S_BIP_ERR_CNT_LANE_2) +#define G_BIP_ERR_CNT_LANE_2(x) (((x) >> S_BIP_ERR_CNT_LANE_2) & M_BIP_ERR_CNT_LANE_2) + +#define A_MAC_PORT_MTIP_KR4_BIP_ERR_CNT_LANE_3 0x172c + +#define S_BIP_ERR_CNT_LANE_3 0 +#define M_BIP_ERR_CNT_LANE_3 0xffffU +#define V_BIP_ERR_CNT_LANE_3(x) ((x) << S_BIP_ERR_CNT_LANE_3) +#define G_BIP_ERR_CNT_LANE_3(x) (((x) >> S_BIP_ERR_CNT_LANE_3) & M_BIP_ERR_CNT_LANE_3) + #define A_MAC_PORT_MTIP_VLAN_TPID_0 0x1a00 -#if 0 /* M_VLANTAG collides with M_VLANTAG in sys/mbuf.h */ #define S_VLANTAG 0 -#define M_VLANTAG 0xffffU +#define CXGBE_M_VLANTAG 0xffffU #define V_VLANTAG(x) ((x) << S_VLANTAG) -#define G_VLANTAG(x) (((x) >> S_VLANTAG) & M_VLANTAG) -#endif +#define G_VLANTAG(x) (((x) >> S_VLANTAG) & CXGBE_M_VLANTAG) #define A_MAC_PORT_MTIP_VLAN_TPID_1 0x1a04 #define A_MAC_PORT_MTIP_VLAN_TPID_2 0x1a08 @@ -35222,6 +49021,329 @@ #define A_MAC_PORT_MTIP_VLAN_TPID_5 0x1a14 #define A_MAC_PORT_MTIP_VLAN_TPID_6 0x1a18 #define A_MAC_PORT_MTIP_VLAN_TPID_7 0x1a1c +#define A_MAC_PORT_MTIP_KR4_LANE_0_MAPPING 0x1a40 + +#define S_KR4_LANE_0_MAPPING 0 +#define M_KR4_LANE_0_MAPPING 0x3U +#define V_KR4_LANE_0_MAPPING(x) ((x) << S_KR4_LANE_0_MAPPING) +#define G_KR4_LANE_0_MAPPING(x) (((x) >> S_KR4_LANE_0_MAPPING) & M_KR4_LANE_0_MAPPING) + +#define A_MAC_PORT_MTIP_KR4_LANE_1_MAPPING 0x1a44 + +#define S_KR4_LANE_1_MAPPING 0 +#define M_KR4_LANE_1_MAPPING 0x3U +#define V_KR4_LANE_1_MAPPING(x) ((x) << S_KR4_LANE_1_MAPPING) +#define G_KR4_LANE_1_MAPPING(x) (((x) >> S_KR4_LANE_1_MAPPING) & M_KR4_LANE_1_MAPPING) + +#define A_MAC_PORT_MTIP_KR4_LANE_2_MAPPING 0x1a48 + +#define S_KR4_LANE_2_MAPPING 0 +#define M_KR4_LANE_2_MAPPING 0x3U +#define V_KR4_LANE_2_MAPPING(x) ((x) << S_KR4_LANE_2_MAPPING) +#define G_KR4_LANE_2_MAPPING(x) (((x) >> S_KR4_LANE_2_MAPPING) & M_KR4_LANE_2_MAPPING) + +#define A_MAC_PORT_MTIP_KR4_LANE_3_MAPPING 0x1a4c + +#define S_KR4_LANE_3_MAPPING 0 +#define M_KR4_LANE_3_MAPPING 0x3U +#define V_KR4_LANE_3_MAPPING(x) ((x) << S_KR4_LANE_3_MAPPING) +#define G_KR4_LANE_3_MAPPING(x) (((x) >> S_KR4_LANE_3_MAPPING) & M_KR4_LANE_3_MAPPING) + +#define A_MAC_PORT_MTIP_KR4_SCRATCH 0x1af0 +#define A_MAC_PORT_MTIP_KR4_CORE_REVISION 0x1af4 +#define A_MAC_PORT_MTIP_KR4_VL_INTVL 0x1af8 + +#define S_SHRT_MRKR_CNFG 0 +#define V_SHRT_MRKR_CNFG(x) ((x) << S_SHRT_MRKR_CNFG) +#define F_SHRT_MRKR_CNFG V_SHRT_MRKR_CNFG(1U) + +#define A_MAC_PORT_MTIP_KR4_TX_LANE_THRESH 0x1afc +#define A_MAC_PORT_MTIP_CR4_CONTROL_1 0x1b00 +#define A_MAC_PORT_MTIP_CR4_STATUS_1 0x1b04 + +#define S_CR4_RX_LINK_STATUS 2 +#define V_CR4_RX_LINK_STATUS(x) ((x) << S_CR4_RX_LINK_STATUS) +#define F_CR4_RX_LINK_STATUS V_CR4_RX_LINK_STATUS(1U) + +#define A_MAC_PORT_MTIP_CR4_DEVICE_ID0 0x1b08 + +#define S_CR4_DEVICE_ID0 0 +#define M_CR4_DEVICE_ID0 0xffffU +#define V_CR4_DEVICE_ID0(x) ((x) << S_CR4_DEVICE_ID0) +#define G_CR4_DEVICE_ID0(x) (((x) >> S_CR4_DEVICE_ID0) & M_CR4_DEVICE_ID0) + +#define A_MAC_PORT_MTIP_CR4_DEVICE_ID1 0x1b0c + +#define S_CR4_DEVICE_ID1 0 +#define M_CR4_DEVICE_ID1 0xffffU +#define V_CR4_DEVICE_ID1(x) ((x) << S_CR4_DEVICE_ID1) +#define G_CR4_DEVICE_ID1(x) (((x) >> S_CR4_DEVICE_ID1) & M_CR4_DEVICE_ID1) + +#define A_MAC_PORT_MTIP_CR4_SPEED_ABILITY 0x1b10 + +#define S_CR4_100G_CAPABLE 8 +#define V_CR4_100G_CAPABLE(x) ((x) << S_CR4_100G_CAPABLE) +#define F_CR4_100G_CAPABLE V_CR4_100G_CAPABLE(1U) + +#define S_CR4_40G_CAPABLE 7 +#define V_CR4_40G_CAPABLE(x) ((x) << S_CR4_40G_CAPABLE) +#define F_CR4_40G_CAPABLE V_CR4_40G_CAPABLE(1U) + +#define A_MAC_PORT_MTIP_CR4_DEVICES_IN_PKG1 0x1b14 + +#define S_CLAUSE22REG_PRESENT 0 +#define V_CLAUSE22REG_PRESENT(x) ((x) << S_CLAUSE22REG_PRESENT) +#define F_CLAUSE22REG_PRESENT V_CLAUSE22REG_PRESENT(1U) + +#define A_MAC_PORT_MTIP_CR4_DEVICES_IN_PKG2 0x1b18 + +#define S_VSD_2_PRESENT 15 +#define V_VSD_2_PRESENT(x) ((x) << S_VSD_2_PRESENT) +#define F_VSD_2_PRESENT V_VSD_2_PRESENT(1U) + +#define S_VSD_1_PRESENT 14 +#define V_VSD_1_PRESENT(x) ((x) << S_VSD_1_PRESENT) +#define F_VSD_1_PRESENT V_VSD_1_PRESENT(1U) + +#define S_CLAUSE22_EXT_PRESENT 13 +#define V_CLAUSE22_EXT_PRESENT(x) ((x) << S_CLAUSE22_EXT_PRESENT) +#define F_CLAUSE22_EXT_PRESENT V_CLAUSE22_EXT_PRESENT(1U) + +#define A_MAC_PORT_MTIP_CR4_CONTROL_2 0x1b1c + +#define S_CR4_PCS_TYPE_SELECTION 0 +#define M_CR4_PCS_TYPE_SELECTION 0x7U +#define V_CR4_PCS_TYPE_SELECTION(x) ((x) << S_CR4_PCS_TYPE_SELECTION) +#define G_CR4_PCS_TYPE_SELECTION(x) (((x) >> S_CR4_PCS_TYPE_SELECTION) & M_CR4_PCS_TYPE_SELECTION) + +#define A_MAC_PORT_MTIP_CR4_STATUS_2 0x1b20 +#define A_MAC_PORT_MTIP_CR4_PKG_ID0 0x1b38 +#define A_MAC_PORT_MTIP_CR4_PKG_ID1 0x1b3c +#define A_MAC_PORT_MTIP_CR4_BASE_R_STATUS_1 0x1b80 + +#define S_RX_LINK_STAT 12 +#define V_RX_LINK_STAT(x) ((x) << S_RX_LINK_STAT) +#define F_RX_LINK_STAT V_RX_LINK_STAT(1U) + +#define S_BR_BLOCK_LOCK 0 +#define V_BR_BLOCK_LOCK(x) ((x) << S_BR_BLOCK_LOCK) +#define F_BR_BLOCK_LOCK V_BR_BLOCK_LOCK(1U) + +#define A_MAC_PORT_MTIP_CR4_BASE_R_STATUS_2 0x1b84 + +#define S_BER_COUNTER 8 +#define M_BER_COUNTER 0x3fU +#define V_BER_COUNTER(x) ((x) << S_BER_COUNTER) +#define G_BER_COUNTER(x) (((x) >> S_BER_COUNTER) & M_BER_COUNTER) + +#define S_ERRORED_BLOCKS_CNTR 0 +#define M_ERRORED_BLOCKS_CNTR 0xffU +#define V_ERRORED_BLOCKS_CNTR(x) ((x) << S_ERRORED_BLOCKS_CNTR) +#define G_ERRORED_BLOCKS_CNTR(x) (((x) >> S_ERRORED_BLOCKS_CNTR) & M_ERRORED_BLOCKS_CNTR) + +#define A_MAC_PORT_MTIP_CR4_BASE_R_TEST_CONTROL 0x1ba8 + +#define S_SCRAMBLED_ID_TP_EN 7 +#define V_SCRAMBLED_ID_TP_EN(x) ((x) << S_SCRAMBLED_ID_TP_EN) +#define F_SCRAMBLED_ID_TP_EN V_SCRAMBLED_ID_TP_EN(1U) + +#define A_MAC_PORT_MTIP_CR4_BASE_R_TEST_ERR_CNT 0x1bac + +#define S_BASE_R_TEST_ERR_CNT 0 +#define M_BASE_R_TEST_ERR_CNT 0xffffU +#define V_BASE_R_TEST_ERR_CNT(x) ((x) << S_BASE_R_TEST_ERR_CNT) +#define G_BASE_R_TEST_ERR_CNT(x) (((x) >> S_BASE_R_TEST_ERR_CNT) & M_BASE_R_TEST_ERR_CNT) + +#define A_MAC_PORT_MTIP_CR4_BER_HIGH_ORDER_CNT 0x1bb0 + +#define S_BER_HIGH_ORDER_CNT 0 +#define M_BER_HIGH_ORDER_CNT 0xffffU +#define V_BER_HIGH_ORDER_CNT(x) ((x) << S_BER_HIGH_ORDER_CNT) +#define G_BER_HIGH_ORDER_CNT(x) (((x) >> S_BER_HIGH_ORDER_CNT) & M_BER_HIGH_ORDER_CNT) + +#define A_MAC_PORT_MTIP_CR4_ERR_BLK_HIGH_ORDER_CNT 0x1bb4 + +#define S_HI_ORDER_CNT_PRESENT 15 +#define V_HI_ORDER_CNT_PRESENT(x) ((x) << S_HI_ORDER_CNT_PRESENT) +#define F_HI_ORDER_CNT_PRESENT V_HI_ORDER_CNT_PRESENT(1U) + +#define S_ERR_BLKS_CNTR 0 +#define M_ERR_BLKS_CNTR 0x3fffU +#define V_ERR_BLKS_CNTR(x) ((x) << S_ERR_BLKS_CNTR) +#define G_ERR_BLKS_CNTR(x) (((x) >> S_ERR_BLKS_CNTR) & M_ERR_BLKS_CNTR) + +#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_1 0x1bc8 + +#define S_LANE_ALIGN_STAT 12 +#define V_LANE_ALIGN_STAT(x) ((x) << S_LANE_ALIGN_STAT) +#define F_LANE_ALIGN_STAT V_LANE_ALIGN_STAT(1U) + +#define S_LANE_7_BLCK_LCK 7 +#define V_LANE_7_BLCK_LCK(x) ((x) << S_LANE_7_BLCK_LCK) +#define F_LANE_7_BLCK_LCK V_LANE_7_BLCK_LCK(1U) + +#define S_LANE_6_BLCK_LCK 6 +#define V_LANE_6_BLCK_LCK(x) ((x) << S_LANE_6_BLCK_LCK) +#define F_LANE_6_BLCK_LCK V_LANE_6_BLCK_LCK(1U) + +#define S_LANE_5_BLCK_LCK 5 +#define V_LANE_5_BLCK_LCK(x) ((x) << S_LANE_5_BLCK_LCK) +#define F_LANE_5_BLCK_LCK V_LANE_5_BLCK_LCK(1U) + +#define S_LANE_4_BLCK_LCK 4 +#define V_LANE_4_BLCK_LCK(x) ((x) << S_LANE_4_BLCK_LCK) +#define F_LANE_4_BLCK_LCK V_LANE_4_BLCK_LCK(1U) + +#define S_LANE_3_BLCK_LCK 3 +#define V_LANE_3_BLCK_LCK(x) ((x) << S_LANE_3_BLCK_LCK) +#define F_LANE_3_BLCK_LCK V_LANE_3_BLCK_LCK(1U) + +#define S_LANE_2_BLCK_LCK 2 +#define V_LANE_2_BLCK_LCK(x) ((x) << S_LANE_2_BLCK_LCK) +#define F_LANE_2_BLCK_LCK V_LANE_2_BLCK_LCK(1U) + +#define S_LANE_1_BLCK_LCK 1 +#define V_LANE_1_BLCK_LCK(x) ((x) << S_LANE_1_BLCK_LCK) +#define F_LANE_1_BLCK_LCK V_LANE_1_BLCK_LCK(1U) + +#define S_LANE_0_BLCK_LCK 0 +#define V_LANE_0_BLCK_LCK(x) ((x) << S_LANE_0_BLCK_LCK) +#define F_LANE_0_BLCK_LCK V_LANE_0_BLCK_LCK(1U) + +#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_2 0x1bcc + +#define S_LANE_19_BLCK_LCK 11 +#define V_LANE_19_BLCK_LCK(x) ((x) << S_LANE_19_BLCK_LCK) +#define F_LANE_19_BLCK_LCK V_LANE_19_BLCK_LCK(1U) + +#define S_LANE_18_BLCK_LCK 10 +#define V_LANE_18_BLCK_LCK(x) ((x) << S_LANE_18_BLCK_LCK) +#define F_LANE_18_BLCK_LCK V_LANE_18_BLCK_LCK(1U) + +#define S_LANE_17_BLCK_LCK 9 +#define V_LANE_17_BLCK_LCK(x) ((x) << S_LANE_17_BLCK_LCK) +#define F_LANE_17_BLCK_LCK V_LANE_17_BLCK_LCK(1U) + +#define S_LANE_16_BLCK_LCK 8 +#define V_LANE_16_BLCK_LCK(x) ((x) << S_LANE_16_BLCK_LCK) +#define F_LANE_16_BLCK_LCK V_LANE_16_BLCK_LCK(1U) + +#define S_LANE_15_BLCK_LCK 7 +#define V_LANE_15_BLCK_LCK(x) ((x) << S_LANE_15_BLCK_LCK) +#define F_LANE_15_BLCK_LCK V_LANE_15_BLCK_LCK(1U) + +#define S_LANE_14_BLCK_LCK 6 +#define V_LANE_14_BLCK_LCK(x) ((x) << S_LANE_14_BLCK_LCK) +#define F_LANE_14_BLCK_LCK V_LANE_14_BLCK_LCK(1U) + +#define S_LANE_13_BLCK_LCK 5 +#define V_LANE_13_BLCK_LCK(x) ((x) << S_LANE_13_BLCK_LCK) +#define F_LANE_13_BLCK_LCK V_LANE_13_BLCK_LCK(1U) + +#define S_LANE_12_BLCK_LCK 4 +#define V_LANE_12_BLCK_LCK(x) ((x) << S_LANE_12_BLCK_LCK) +#define F_LANE_12_BLCK_LCK V_LANE_12_BLCK_LCK(1U) + +#define S_LANE_11_BLCK_LCK 3 +#define V_LANE_11_BLCK_LCK(x) ((x) << S_LANE_11_BLCK_LCK) +#define F_LANE_11_BLCK_LCK V_LANE_11_BLCK_LCK(1U) + +#define S_LANE_10_BLCK_LCK 2 +#define V_LANE_10_BLCK_LCK(x) ((x) << S_LANE_10_BLCK_LCK) +#define F_LANE_10_BLCK_LCK V_LANE_10_BLCK_LCK(1U) + +#define S_LANE_9_BLCK_LCK 1 +#define V_LANE_9_BLCK_LCK(x) ((x) << S_LANE_9_BLCK_LCK) +#define F_LANE_9_BLCK_LCK V_LANE_9_BLCK_LCK(1U) + +#define S_LANE_8_BLCK_LCK 0 +#define V_LANE_8_BLCK_LCK(x) ((x) << S_LANE_8_BLCK_LCK) +#define F_LANE_8_BLCK_LCK V_LANE_8_BLCK_LCK(1U) + +#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_3 0x1bd0 + +#define S_LANE7_ALGN_MRKR_LCK 7 +#define V_LANE7_ALGN_MRKR_LCK(x) ((x) << S_LANE7_ALGN_MRKR_LCK) +#define F_LANE7_ALGN_MRKR_LCK V_LANE7_ALGN_MRKR_LCK(1U) + +#define S_LANE6_ALGN_MRKR_LCK 6 +#define V_LANE6_ALGN_MRKR_LCK(x) ((x) << S_LANE6_ALGN_MRKR_LCK) +#define F_LANE6_ALGN_MRKR_LCK V_LANE6_ALGN_MRKR_LCK(1U) + +#define S_LANE5_ALGN_MRKR_LCK 5 +#define V_LANE5_ALGN_MRKR_LCK(x) ((x) << S_LANE5_ALGN_MRKR_LCK) +#define F_LANE5_ALGN_MRKR_LCK V_LANE5_ALGN_MRKR_LCK(1U) + +#define S_LANE4_ALGN_MRKR_LCK 4 +#define V_LANE4_ALGN_MRKR_LCK(x) ((x) << S_LANE4_ALGN_MRKR_LCK) +#define F_LANE4_ALGN_MRKR_LCK V_LANE4_ALGN_MRKR_LCK(1U) + +#define S_LANE3_ALGN_MRKR_LCK 3 +#define V_LANE3_ALGN_MRKR_LCK(x) ((x) << S_LANE3_ALGN_MRKR_LCK) +#define F_LANE3_ALGN_MRKR_LCK V_LANE3_ALGN_MRKR_LCK(1U) + +#define S_LANE2_ALGN_MRKR_LCK 2 +#define V_LANE2_ALGN_MRKR_LCK(x) ((x) << S_LANE2_ALGN_MRKR_LCK) +#define F_LANE2_ALGN_MRKR_LCK V_LANE2_ALGN_MRKR_LCK(1U) + +#define S_LANE1_ALGN_MRKR_LCK 1 +#define V_LANE1_ALGN_MRKR_LCK(x) ((x) << S_LANE1_ALGN_MRKR_LCK) +#define F_LANE1_ALGN_MRKR_LCK V_LANE1_ALGN_MRKR_LCK(1U) + +#define S_LANE0_ALGN_MRKR_LCK 0 +#define V_LANE0_ALGN_MRKR_LCK(x) ((x) << S_LANE0_ALGN_MRKR_LCK) +#define F_LANE0_ALGN_MRKR_LCK V_LANE0_ALGN_MRKR_LCK(1U) + +#define A_MAC_PORT_MTIP_CR4_MULTI_LANE_ALIGN_STATUS_4 0x1bd4 + +#define S_LANE19_ALGN_MRKR_LCK 11 +#define V_LANE19_ALGN_MRKR_LCK(x) ((x) << S_LANE19_ALGN_MRKR_LCK) +#define F_LANE19_ALGN_MRKR_LCK V_LANE19_ALGN_MRKR_LCK(1U) + +#define S_LANE18_ALGN_MRKR_LCK 10 +#define V_LANE18_ALGN_MRKR_LCK(x) ((x) << S_LANE18_ALGN_MRKR_LCK) +#define F_LANE18_ALGN_MRKR_LCK V_LANE18_ALGN_MRKR_LCK(1U) + +#define S_LANE17_ALGN_MRKR_LCK 9 +#define V_LANE17_ALGN_MRKR_LCK(x) ((x) << S_LANE17_ALGN_MRKR_LCK) +#define F_LANE17_ALGN_MRKR_LCK V_LANE17_ALGN_MRKR_LCK(1U) + +#define S_LANE16_ALGN_MRKR_LCK 8 +#define V_LANE16_ALGN_MRKR_LCK(x) ((x) << S_LANE16_ALGN_MRKR_LCK) +#define F_LANE16_ALGN_MRKR_LCK V_LANE16_ALGN_MRKR_LCK(1U) + +#define S_LANE15_ALGN_MRKR_LCK 7 +#define V_LANE15_ALGN_MRKR_LCK(x) ((x) << S_LANE15_ALGN_MRKR_LCK) +#define F_LANE15_ALGN_MRKR_LCK V_LANE15_ALGN_MRKR_LCK(1U) + +#define S_LANE14_ALGN_MRKR_LCK 6 +#define V_LANE14_ALGN_MRKR_LCK(x) ((x) << S_LANE14_ALGN_MRKR_LCK) +#define F_LANE14_ALGN_MRKR_LCK V_LANE14_ALGN_MRKR_LCK(1U) + +#define S_LANE13_ALGN_MRKR_LCK 5 +#define V_LANE13_ALGN_MRKR_LCK(x) ((x) << S_LANE13_ALGN_MRKR_LCK) +#define F_LANE13_ALGN_MRKR_LCK V_LANE13_ALGN_MRKR_LCK(1U) + +#define S_LANE12_ALGN_MRKR_LCK 4 +#define V_LANE12_ALGN_MRKR_LCK(x) ((x) << S_LANE12_ALGN_MRKR_LCK) +#define F_LANE12_ALGN_MRKR_LCK V_LANE12_ALGN_MRKR_LCK(1U) + +#define S_LANE11_ALGN_MRKR_LCK 3 +#define V_LANE11_ALGN_MRKR_LCK(x) ((x) << S_LANE11_ALGN_MRKR_LCK) +#define F_LANE11_ALGN_MRKR_LCK V_LANE11_ALGN_MRKR_LCK(1U) + +#define S_LANE10_ALGN_MRKR_LCK 2 +#define V_LANE10_ALGN_MRKR_LCK(x) ((x) << S_LANE10_ALGN_MRKR_LCK) +#define F_LANE10_ALGN_MRKR_LCK V_LANE10_ALGN_MRKR_LCK(1U) + +#define S_LANE9_ALGN_MRKR_LCK 1 +#define V_LANE9_ALGN_MRKR_LCK(x) ((x) << S_LANE9_ALGN_MRKR_LCK) +#define F_LANE9_ALGN_MRKR_LCK V_LANE9_ALGN_MRKR_LCK(1U) + +#define S_LANE8_ALGN_MRKR_LCK 0 +#define V_LANE8_ALGN_MRKR_LCK(x) ((x) << S_LANE8_ALGN_MRKR_LCK) +#define F_LANE8_ALGN_MRKR_LCK V_LANE8_ALGN_MRKR_LCK(1U) + #define A_MAC_PORT_MTIP_PCS_CTL 0x1e00 #define S_PCS_LPBK 14 @@ -35380,6 +49502,48 @@ #define V_10GBASE_R(x) ((x) << S_10GBASE_R) #define F_10GBASE_R V_10GBASE_R(1U) +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_0 0x1e20 + +#define S_BIP_ERR_CNTLANE_0 0 +#define M_BIP_ERR_CNTLANE_0 0xffffU +#define V_BIP_ERR_CNTLANE_0(x) ((x) << S_BIP_ERR_CNTLANE_0) +#define G_BIP_ERR_CNTLANE_0(x) (((x) >> S_BIP_ERR_CNTLANE_0) & M_BIP_ERR_CNTLANE_0) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_1 0x1e24 + +#define S_BIP_ERR_CNTLANE_1 0 +#define M_BIP_ERR_CNTLANE_1 0xffffU +#define V_BIP_ERR_CNTLANE_1(x) ((x) << S_BIP_ERR_CNTLANE_1) +#define G_BIP_ERR_CNTLANE_1(x) (((x) >> S_BIP_ERR_CNTLANE_1) & M_BIP_ERR_CNTLANE_1) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_2 0x1e28 + +#define S_BIP_ERR_CNTLANE_2 0 +#define M_BIP_ERR_CNTLANE_2 0xffffU +#define V_BIP_ERR_CNTLANE_2(x) ((x) << S_BIP_ERR_CNTLANE_2) +#define G_BIP_ERR_CNTLANE_2(x) (((x) >> S_BIP_ERR_CNTLANE_2) & M_BIP_ERR_CNTLANE_2) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_3 0x1e2c + +#define S_BIP_ERR_CNTLANE_3 0 +#define M_BIP_ERR_CNTLANE_3 0xffffU +#define V_BIP_ERR_CNTLANE_3(x) ((x) << S_BIP_ERR_CNTLANE_3) +#define G_BIP_ERR_CNTLANE_3(x) (((x) >> S_BIP_ERR_CNTLANE_3) & M_BIP_ERR_CNTLANE_3) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_4 0x1e30 + +#define S_BIP_ERR_CNTLANE_4 0 +#define M_BIP_ERR_CNTLANE_4 0xffffU +#define V_BIP_ERR_CNTLANE_4(x) ((x) << S_BIP_ERR_CNTLANE_4) +#define G_BIP_ERR_CNTLANE_4(x) (((x) >> S_BIP_ERR_CNTLANE_4) & M_BIP_ERR_CNTLANE_4) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_5 0x1e34 + +#define S_BIP_ERR_CNTLANE_5 0 +#define M_BIP_ERR_CNTLANE_5 0xffffU +#define V_BIP_ERR_CNTLANE_5(x) ((x) << S_BIP_ERR_CNTLANE_5) +#define G_BIP_ERR_CNTLANE_5(x) (((x) >> S_BIP_ERR_CNTLANE_5) & M_BIP_ERR_CNTLANE_5) + #define A_MAC_PORT_MTIP_PCS_PKG_ID0 0x1e38 #define S_PKG_ID0 0 @@ -35387,6 +49551,13 @@ #define V_PKG_ID0(x) ((x) << S_PKG_ID0) #define G_PKG_ID0(x) (((x) >> S_PKG_ID0) & M_PKG_ID0) +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_6 0x1e38 + +#define S_BIP_ERR_CNTLANE_6 0 +#define M_BIP_ERR_CNTLANE_6 0xffffU +#define V_BIP_ERR_CNTLANE_6(x) ((x) << S_BIP_ERR_CNTLANE_6) +#define G_BIP_ERR_CNTLANE_6(x) (((x) >> S_BIP_ERR_CNTLANE_6) & M_BIP_ERR_CNTLANE_6) + #define A_MAC_PORT_MTIP_PCS_PKG_ID1 0x1e3c #define S_PKG_ID1 0 @@ -35394,6 +49565,97 @@ #define V_PKG_ID1(x) ((x) << S_PKG_ID1) #define G_PKG_ID1(x) (((x) >> S_PKG_ID1) & M_PKG_ID1) +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_7 0x1e3c + +#define S_BIP_ERR_CNTLANE_7 0 +#define M_BIP_ERR_CNTLANE_7 0xffffU +#define V_BIP_ERR_CNTLANE_7(x) ((x) << S_BIP_ERR_CNTLANE_7) +#define G_BIP_ERR_CNTLANE_7(x) (((x) >> S_BIP_ERR_CNTLANE_7) & M_BIP_ERR_CNTLANE_7) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_8 0x1e40 + +#define S_BIP_ERR_CNTLANE_8 0 +#define M_BIP_ERR_CNTLANE_8 0xffffU +#define V_BIP_ERR_CNTLANE_8(x) ((x) << S_BIP_ERR_CNTLANE_8) +#define G_BIP_ERR_CNTLANE_8(x) (((x) >> S_BIP_ERR_CNTLANE_8) & M_BIP_ERR_CNTLANE_8) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_9 0x1e44 + +#define S_BIP_ERR_CNTLANE_9 0 +#define M_BIP_ERR_CNTLANE_9 0xffffU +#define V_BIP_ERR_CNTLANE_9(x) ((x) << S_BIP_ERR_CNTLANE_9) +#define G_BIP_ERR_CNTLANE_9(x) (((x) >> S_BIP_ERR_CNTLANE_9) & M_BIP_ERR_CNTLANE_9) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_10 0x1e48 + +#define S_BIP_ERR_CNTLANE_10 0 +#define M_BIP_ERR_CNTLANE_10 0xffffU +#define V_BIP_ERR_CNTLANE_10(x) ((x) << S_BIP_ERR_CNTLANE_10) +#define G_BIP_ERR_CNTLANE_10(x) (((x) >> S_BIP_ERR_CNTLANE_10) & M_BIP_ERR_CNTLANE_10) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_11 0x1e4c + +#define S_BIP_ERR_CNTLANE_11 0 +#define M_BIP_ERR_CNTLANE_11 0xffffU +#define V_BIP_ERR_CNTLANE_11(x) ((x) << S_BIP_ERR_CNTLANE_11) +#define G_BIP_ERR_CNTLANE_11(x) (((x) >> S_BIP_ERR_CNTLANE_11) & M_BIP_ERR_CNTLANE_11) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_12 0x1e50 + +#define S_BIP_ERR_CNTLANE_12 0 +#define M_BIP_ERR_CNTLANE_12 0xffffU +#define V_BIP_ERR_CNTLANE_12(x) ((x) << S_BIP_ERR_CNTLANE_12) +#define G_BIP_ERR_CNTLANE_12(x) (((x) >> S_BIP_ERR_CNTLANE_12) & M_BIP_ERR_CNTLANE_12) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_13 0x1e54 + +#define S_BIP_ERR_CNTLANE_13 0 +#define M_BIP_ERR_CNTLANE_13 0xffffU +#define V_BIP_ERR_CNTLANE_13(x) ((x) << S_BIP_ERR_CNTLANE_13) +#define G_BIP_ERR_CNTLANE_13(x) (((x) >> S_BIP_ERR_CNTLANE_13) & M_BIP_ERR_CNTLANE_13) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_14 0x1e58 + +#define S_BIP_ERR_CNTLANE_14 0 +#define M_BIP_ERR_CNTLANE_14 0xffffU +#define V_BIP_ERR_CNTLANE_14(x) ((x) << S_BIP_ERR_CNTLANE_14) +#define G_BIP_ERR_CNTLANE_14(x) (((x) >> S_BIP_ERR_CNTLANE_14) & M_BIP_ERR_CNTLANE_14) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_15 0x1e5c + +#define S_BIP_ERR_CNTLANE_15 0 +#define M_BIP_ERR_CNTLANE_15 0xffffU +#define V_BIP_ERR_CNTLANE_15(x) ((x) << S_BIP_ERR_CNTLANE_15) +#define G_BIP_ERR_CNTLANE_15(x) (((x) >> S_BIP_ERR_CNTLANE_15) & M_BIP_ERR_CNTLANE_15) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_16 0x1e60 + +#define S_BIP_ERR_CNTLANE_16 0 +#define M_BIP_ERR_CNTLANE_16 0xffffU +#define V_BIP_ERR_CNTLANE_16(x) ((x) << S_BIP_ERR_CNTLANE_16) +#define G_BIP_ERR_CNTLANE_16(x) (((x) >> S_BIP_ERR_CNTLANE_16) & M_BIP_ERR_CNTLANE_16) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_17 0x1e64 + +#define S_BIP_ERR_CNTLANE_17 0 +#define M_BIP_ERR_CNTLANE_17 0xffffU +#define V_BIP_ERR_CNTLANE_17(x) ((x) << S_BIP_ERR_CNTLANE_17) +#define G_BIP_ERR_CNTLANE_17(x) (((x) >> S_BIP_ERR_CNTLANE_17) & M_BIP_ERR_CNTLANE_17) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_18 0x1e68 + +#define S_BIP_ERR_CNTLANE_18 0 +#define M_BIP_ERR_CNTLANE_18 0xffffU +#define V_BIP_ERR_CNTLANE_18(x) ((x) << S_BIP_ERR_CNTLANE_18) +#define G_BIP_ERR_CNTLANE_18(x) (((x) >> S_BIP_ERR_CNTLANE_18) & M_BIP_ERR_CNTLANE_18) + +#define A_MAC_PORT_MTIP_CR4_BIP_ERR_CNTLANE_19 0x1e6c + +#define S_BIP_ERR_CNTLANE_19 0 +#define M_BIP_ERR_CNTLANE_19 0xffffU +#define V_BIP_ERR_CNTLANE_19(x) ((x) << S_BIP_ERR_CNTLANE_19) +#define G_BIP_ERR_CNTLANE_19(x) (((x) >> S_BIP_ERR_CNTLANE_19) & M_BIP_ERR_CNTLANE_19) + #define A_MAC_PORT_MTIP_PCS_BASER_STATUS1 0x1e80 #define S_RXLINKSTATUS 12 @@ -35776,6 +50038,154 @@ #define A_MAC_PORT_MTIP_PCS_LANE_MAP_17 0x1ffc #define A_MAC_PORT_MTIP_PCS_LANE_MAP_18 0x2000 #define A_MAC_PORT_MTIP_PCS_LANE_MAP_19 0x2004 +#define A_MAC_PORT_MTIP_CR4_LANE_0_MAPPING 0x2140 + +#define S_LANE_0_MAPPING 0 +#define M_LANE_0_MAPPING 0x3fU +#define V_LANE_0_MAPPING(x) ((x) << S_LANE_0_MAPPING) +#define G_LANE_0_MAPPING(x) (((x) >> S_LANE_0_MAPPING) & M_LANE_0_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_1_MAPPING 0x2144 + +#define S_LANE_1_MAPPING 0 +#define M_LANE_1_MAPPING 0x3fU +#define V_LANE_1_MAPPING(x) ((x) << S_LANE_1_MAPPING) +#define G_LANE_1_MAPPING(x) (((x) >> S_LANE_1_MAPPING) & M_LANE_1_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_2_MAPPING 0x2148 + +#define S_LANE_2_MAPPING 0 +#define M_LANE_2_MAPPING 0x3fU +#define V_LANE_2_MAPPING(x) ((x) << S_LANE_2_MAPPING) +#define G_LANE_2_MAPPING(x) (((x) >> S_LANE_2_MAPPING) & M_LANE_2_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_3_MAPPING 0x214c + +#define S_LANE_3_MAPPING 0 +#define M_LANE_3_MAPPING 0x3fU +#define V_LANE_3_MAPPING(x) ((x) << S_LANE_3_MAPPING) +#define G_LANE_3_MAPPING(x) (((x) >> S_LANE_3_MAPPING) & M_LANE_3_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_4_MAPPING 0x2150 + +#define S_LANE_4_MAPPING 0 +#define M_LANE_4_MAPPING 0x3fU +#define V_LANE_4_MAPPING(x) ((x) << S_LANE_4_MAPPING) +#define G_LANE_4_MAPPING(x) (((x) >> S_LANE_4_MAPPING) & M_LANE_4_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_5_MAPPING 0x2154 + +#define S_LANE_5_MAPPING 0 +#define M_LANE_5_MAPPING 0x3fU +#define V_LANE_5_MAPPING(x) ((x) << S_LANE_5_MAPPING) +#define G_LANE_5_MAPPING(x) (((x) >> S_LANE_5_MAPPING) & M_LANE_5_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_6_MAPPING 0x2158 + +#define S_LANE_6_MAPPING 0 +#define M_LANE_6_MAPPING 0x3fU +#define V_LANE_6_MAPPING(x) ((x) << S_LANE_6_MAPPING) +#define G_LANE_6_MAPPING(x) (((x) >> S_LANE_6_MAPPING) & M_LANE_6_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_7_MAPPING 0x215c + +#define S_LANE_7_MAPPING 0 +#define M_LANE_7_MAPPING 0x3fU +#define V_LANE_7_MAPPING(x) ((x) << S_LANE_7_MAPPING) +#define G_LANE_7_MAPPING(x) (((x) >> S_LANE_7_MAPPING) & M_LANE_7_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_8_MAPPING 0x2160 + +#define S_LANE_8_MAPPING 0 +#define M_LANE_8_MAPPING 0x3fU +#define V_LANE_8_MAPPING(x) ((x) << S_LANE_8_MAPPING) +#define G_LANE_8_MAPPING(x) (((x) >> S_LANE_8_MAPPING) & M_LANE_8_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_9_MAPPING 0x2164 + +#define S_LANE_9_MAPPING 0 +#define M_LANE_9_MAPPING 0x3fU +#define V_LANE_9_MAPPING(x) ((x) << S_LANE_9_MAPPING) +#define G_LANE_9_MAPPING(x) (((x) >> S_LANE_9_MAPPING) & M_LANE_9_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_10_MAPPING 0x2168 + +#define S_LANE_10_MAPPING 0 +#define M_LANE_10_MAPPING 0x3fU +#define V_LANE_10_MAPPING(x) ((x) << S_LANE_10_MAPPING) +#define G_LANE_10_MAPPING(x) (((x) >> S_LANE_10_MAPPING) & M_LANE_10_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_11_MAPPING 0x216c + +#define S_LANE_11_MAPPING 0 +#define M_LANE_11_MAPPING 0x3fU +#define V_LANE_11_MAPPING(x) ((x) << S_LANE_11_MAPPING) +#define G_LANE_11_MAPPING(x) (((x) >> S_LANE_11_MAPPING) & M_LANE_11_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_12_MAPPING 0x2170 + +#define S_LANE_12_MAPPING 0 +#define M_LANE_12_MAPPING 0x3fU +#define V_LANE_12_MAPPING(x) ((x) << S_LANE_12_MAPPING) +#define G_LANE_12_MAPPING(x) (((x) >> S_LANE_12_MAPPING) & M_LANE_12_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_13_MAPPING 0x2174 + +#define S_LANE_13_MAPPING 0 +#define M_LANE_13_MAPPING 0x3fU +#define V_LANE_13_MAPPING(x) ((x) << S_LANE_13_MAPPING) +#define G_LANE_13_MAPPING(x) (((x) >> S_LANE_13_MAPPING) & M_LANE_13_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_14_MAPPING 0x2178 + +#define S_LANE_14_MAPPING 0 +#define M_LANE_14_MAPPING 0x3fU +#define V_LANE_14_MAPPING(x) ((x) << S_LANE_14_MAPPING) +#define G_LANE_14_MAPPING(x) (((x) >> S_LANE_14_MAPPING) & M_LANE_14_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_15_MAPPING 0x217c + +#define S_LANE_15_MAPPING 0 +#define M_LANE_15_MAPPING 0x3fU +#define V_LANE_15_MAPPING(x) ((x) << S_LANE_15_MAPPING) +#define G_LANE_15_MAPPING(x) (((x) >> S_LANE_15_MAPPING) & M_LANE_15_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_16_MAPPING 0x2180 + +#define S_LANE_16_MAPPING 0 +#define M_LANE_16_MAPPING 0x3fU +#define V_LANE_16_MAPPING(x) ((x) << S_LANE_16_MAPPING) +#define G_LANE_16_MAPPING(x) (((x) >> S_LANE_16_MAPPING) & M_LANE_16_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_17_MAPPING 0x2184 + +#define S_LANE_17_MAPPING 0 +#define M_LANE_17_MAPPING 0x3fU +#define V_LANE_17_MAPPING(x) ((x) << S_LANE_17_MAPPING) +#define G_LANE_17_MAPPING(x) (((x) >> S_LANE_17_MAPPING) & M_LANE_17_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_18_MAPPING 0x2188 + +#define S_LANE_18_MAPPING 0 +#define M_LANE_18_MAPPING 0x3fU +#define V_LANE_18_MAPPING(x) ((x) << S_LANE_18_MAPPING) +#define G_LANE_18_MAPPING(x) (((x) >> S_LANE_18_MAPPING) & M_LANE_18_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_LANE_19_MAPPING 0x218c + +#define S_LANE_19_MAPPING 0 +#define M_LANE_19_MAPPING 0x3fU +#define V_LANE_19_MAPPING(x) ((x) << S_LANE_19_MAPPING) +#define G_LANE_19_MAPPING(x) (((x) >> S_LANE_19_MAPPING) & M_LANE_19_MAPPING) + +#define A_MAC_PORT_MTIP_CR4_SCRATCH 0x21f0 +#define A_MAC_PORT_MTIP_CR4_CORE_REVISION 0x21f4 + +#define S_CORE_REVISION 0 +#define M_CORE_REVISION 0xffffU +#define V_CORE_REVISION(x) ((x) << S_CORE_REVISION) +#define G_CORE_REVISION(x) (((x) >> S_CORE_REVISION) & M_CORE_REVISION) + #define A_MAC_PORT_BEAN_CTL 0x2200 #define S_AN_RESET 15 @@ -35794,6 +50204,16 @@ #define V_RESTART_BEAN(x) ((x) << S_RESTART_BEAN) #define F_RESTART_BEAN V_RESTART_BEAN(1U) +#define A_MAC_PORT_MTIP_RS_FEC_CONTROL 0x2200 + +#define S_RS_FEC_BYPASS_ERROR_INDICATION 1 +#define V_RS_FEC_BYPASS_ERROR_INDICATION(x) ((x) << S_RS_FEC_BYPASS_ERROR_INDICATION) +#define F_RS_FEC_BYPASS_ERROR_INDICATION V_RS_FEC_BYPASS_ERROR_INDICATION(1U) + +#define S_RS_FEC_BYPASS_CORRECTION 0 +#define V_RS_FEC_BYPASS_CORRECTION(x) ((x) << S_RS_FEC_BYPASS_CORRECTION) +#define F_RS_FEC_BYPASS_CORRECTION V_RS_FEC_BYPASS_CORRECTION(1U) + #define A_MAC_PORT_BEAN_STATUS 0x2204 #define S_PDF 9 @@ -35824,6 +50244,28 @@ #define V_LP_BEAN_ABILITY(x) ((x) << S_LP_BEAN_ABILITY) #define F_LP_BEAN_ABILITY V_LP_BEAN_ABILITY(1U) +#define A_MAC_PORT_MTIP_RS_FEC_STATUS 0x2204 + +#define S_RS_FEC_PCS_ALIGN_STATUS 15 +#define V_RS_FEC_PCS_ALIGN_STATUS(x) ((x) << S_RS_FEC_PCS_ALIGN_STATUS) +#define F_RS_FEC_PCS_ALIGN_STATUS V_RS_FEC_PCS_ALIGN_STATUS(1U) + +#define S_FEC_ALIGN_STATUS 14 +#define V_FEC_ALIGN_STATUS(x) ((x) << S_FEC_ALIGN_STATUS) +#define F_FEC_ALIGN_STATUS V_FEC_ALIGN_STATUS(1U) + +#define S_RS_FEC_HIGH_SER 2 +#define V_RS_FEC_HIGH_SER(x) ((x) << S_RS_FEC_HIGH_SER) +#define F_RS_FEC_HIGH_SER V_RS_FEC_HIGH_SER(1U) + +#define S_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY 1 +#define V_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY(x) ((x) << S_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY) +#define F_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY V_RS_FEC_BYPASS_ERROR_INDICATION_ABILITY(1U) + +#define S_RS_FEC_BYPASS_CORRECTION_ABILITY 0 +#define V_RS_FEC_BYPASS_CORRECTION_ABILITY(x) ((x) << S_RS_FEC_BYPASS_CORRECTION_ABILITY) +#define F_RS_FEC_BYPASS_CORRECTION_ABILITY V_RS_FEC_BYPASS_CORRECTION_ABILITY(1U) + #define A_MAC_PORT_BEAN_ABILITY_0 0x2208 #define S_NXP 15 @@ -35849,6 +50291,13 @@ #define V_SELECTOR(x) ((x) << S_SELECTOR) #define G_SELECTOR(x) (((x) >> S_SELECTOR) & M_SELECTOR) +#define A_MAC_PORT_MTIP_RS_FEC_CCW_LO 0x2208 + +#define S_RS_RS_FEC_CCW_LO 0 +#define M_RS_RS_FEC_CCW_LO 0xffffU +#define V_RS_RS_FEC_CCW_LO(x) ((x) << S_RS_RS_FEC_CCW_LO) +#define G_RS_RS_FEC_CCW_LO(x) (((x) >> S_RS_RS_FEC_CCW_LO) & M_RS_RS_FEC_CCW_LO) + #define A_MAC_PORT_BEAN_ABILITY_1 0x220c #define S_TECH_ABILITY_1 5 @@ -35861,6 +50310,13 @@ #define V_TX_NONCE(x) ((x) << S_TX_NONCE) #define G_TX_NONCE(x) (((x) >> S_TX_NONCE) & M_TX_NONCE) +#define A_MAC_PORT_MTIP_RS_FEC_CCW_HI 0x220c + +#define S_RS_RS_FEC_CCW_HI 0 +#define M_RS_RS_FEC_CCW_HI 0xffffU +#define V_RS_RS_FEC_CCW_HI(x) ((x) << S_RS_RS_FEC_CCW_HI) +#define G_RS_RS_FEC_CCW_HI(x) (((x) >> S_RS_RS_FEC_CCW_HI) & M_RS_RS_FEC_CCW_HI) + #define A_MAC_PORT_BEAN_ABILITY_2 0x2210 #define S_T5_FEC_ABILITY 14 @@ -35873,8 +50329,29 @@ #define V_TECH_ABILITY_2(x) ((x) << S_TECH_ABILITY_2) #define G_TECH_ABILITY_2(x) (((x) >> S_TECH_ABILITY_2) & M_TECH_ABILITY_2) +#define A_MAC_PORT_MTIP_RS_FEC_NCCW_LO 0x2210 + +#define S_RS_RS_FEC_NCCW_LO 0 +#define M_RS_RS_FEC_NCCW_LO 0xffffU +#define V_RS_RS_FEC_NCCW_LO(x) ((x) << S_RS_RS_FEC_NCCW_LO) +#define G_RS_RS_FEC_NCCW_LO(x) (((x) >> S_RS_RS_FEC_NCCW_LO) & M_RS_RS_FEC_NCCW_LO) + #define A_MAC_PORT_BEAN_REM_ABILITY_0 0x2214 +#define A_MAC_PORT_MTIP_RS_FEC_NCCW_HI 0x2214 + +#define S_RS_RS_FEC_NCCW_HI 0 +#define M_RS_RS_FEC_NCCW_HI 0xffffU +#define V_RS_RS_FEC_NCCW_HI(x) ((x) << S_RS_RS_FEC_NCCW_HI) +#define G_RS_RS_FEC_NCCW_HI(x) (((x) >> S_RS_RS_FEC_NCCW_HI) & M_RS_RS_FEC_NCCW_HI) + #define A_MAC_PORT_BEAN_REM_ABILITY_1 0x2218 +#define A_MAC_PORT_MTIP_RS_FEC_LANEMAPRS_FEC_NCCW_HI 0x2218 + +#define S_PMA_MAPPING 0 +#define M_PMA_MAPPING 0xffU +#define V_PMA_MAPPING(x) ((x) << S_PMA_MAPPING) +#define G_PMA_MAPPING(x) (((x) >> S_PMA_MAPPING) & M_PMA_MAPPING) + #define A_MAC_PORT_BEAN_REM_ABILITY_2 0x221c #define A_MAC_PORT_BEAN_MS_COUNT 0x2220 @@ -35913,10 +50390,40 @@ #define V_UNFORMATED(x) ((x) << S_UNFORMATED) #define G_UNFORMATED(x) (((x) >> S_UNFORMATED) & M_UNFORMATED) +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR0_LO 0x2228 + +#define S_RS_FEC_SYMBLERR0_LO 0 +#define V_RS_FEC_SYMBLERR0_LO(x) ((x) << S_RS_FEC_SYMBLERR0_LO) +#define F_RS_FEC_SYMBLERR0_LO V_RS_FEC_SYMBLERR0_LO(1U) + #define A_MAC_PORT_BEAN_XNP_2 0x222c +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR0_HI 0x222c + +#define S_RS_FEC_SYMBLERR0_HI 0 +#define V_RS_FEC_SYMBLERR0_HI(x) ((x) << S_RS_FEC_SYMBLERR0_HI) +#define F_RS_FEC_SYMBLERR0_HI V_RS_FEC_SYMBLERR0_HI(1U) + #define A_MAC_PORT_LP_BEAN_XNP_0 0x2230 +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR1_LO 0x2230 + +#define S_RS_FEC_SYMBLERR1_LO 0 +#define V_RS_FEC_SYMBLERR1_LO(x) ((x) << S_RS_FEC_SYMBLERR1_LO) +#define F_RS_FEC_SYMBLERR1_LO V_RS_FEC_SYMBLERR1_LO(1U) + #define A_MAC_PORT_LP_BEAN_XNP_1 0x2234 +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR1_HI 0x2234 + +#define S_RS_FEC_SYMBLERR1_HI 0 +#define V_RS_FEC_SYMBLERR1_HI(x) ((x) << S_RS_FEC_SYMBLERR1_HI) +#define F_RS_FEC_SYMBLERR1_HI V_RS_FEC_SYMBLERR1_HI(1U) + #define A_MAC_PORT_LP_BEAN_XNP_2 0x2238 +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR2_LO 0x2238 + +#define S_RS_FEC_SYMBLERR2_LO 0 +#define V_RS_FEC_SYMBLERR2_LO(x) ((x) << S_RS_FEC_SYMBLERR2_LO) +#define F_RS_FEC_SYMBLERR2_LO V_RS_FEC_SYMBLERR2_LO(1U) + #define A_MAC_PORT_BEAN_ETH_STATUS 0x223c #define S_100GCR10 8 @@ -35947,8 +50454,26 @@ #define V_1GKX(x) ((x) << S_1GKX) #define F_1GKX V_1GKX(1U) +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR2_HI 0x223c + +#define S_RS_FEC_SYMBLERR2_HI 0 +#define V_RS_FEC_SYMBLERR2_HI(x) ((x) << S_RS_FEC_SYMBLERR2_HI) +#define F_RS_FEC_SYMBLERR2_HI V_RS_FEC_SYMBLERR2_HI(1U) + #define A_MAC_PORT_BEAN_CTL_LANE1 0x2240 +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR3_LO 0x2240 + +#define S_RS_FEC_SYMBLERR3_LO 0 +#define V_RS_FEC_SYMBLERR3_LO(x) ((x) << S_RS_FEC_SYMBLERR3_LO) +#define F_RS_FEC_SYMBLERR3_LO V_RS_FEC_SYMBLERR3_LO(1U) + #define A_MAC_PORT_BEAN_STATUS_LANE1 0x2244 +#define A_MAC_PORT_MTIP_RS_FEC_SYMBLERR3_HI 0x2244 + +#define S_RS_FEC_SYMBLERR3_HI 0 +#define V_RS_FEC_SYMBLERR3_HI(x) ((x) << S_RS_FEC_SYMBLERR3_HI) +#define F_RS_FEC_SYMBLERR3_HI V_RS_FEC_SYMBLERR3_HI(1U) + #define A_MAC_PORT_BEAN_ABILITY_0_LANE1 0x2248 #define A_MAC_PORT_BEAN_ABILITY_1_LANE1 0x224c #define A_MAC_PORT_BEAN_ABILITY_2_LANE1 0x2250 @@ -35995,6 +50520,92 @@ #define A_MAC_PORT_LP_BEAN_XNP_1_LANE3 0x22f4 #define A_MAC_PORT_LP_BEAN_XNP_2_LANE3 0x22f8 #define A_MAC_PORT_BEAN_ETH_STATUS_LANE3 0x22fc +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_CONTROL 0x2400 + +#define S_RS_FEC_ENABLED_STATUS 15 +#define V_RS_FEC_ENABLED_STATUS(x) ((x) << S_RS_FEC_ENABLED_STATUS) +#define F_RS_FEC_ENABLED_STATUS V_RS_FEC_ENABLED_STATUS(1U) + +#define S_RS_FEC_ENABLE 2 +#define V_RS_FEC_ENABLE(x) ((x) << S_RS_FEC_ENABLE) +#define F_RS_FEC_ENABLE V_RS_FEC_ENABLE(1U) + +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_INFO_1 0x2404 + +#define S_DESKEW_EMPTY 12 +#define M_DESKEW_EMPTY 0xfU +#define V_DESKEW_EMPTY(x) ((x) << S_DESKEW_EMPTY) +#define G_DESKEW_EMPTY(x) (((x) >> S_DESKEW_EMPTY) & M_DESKEW_EMPTY) + +#define S_FEC_ALIGN_STATUS_LH 10 +#define V_FEC_ALIGN_STATUS_LH(x) ((x) << S_FEC_ALIGN_STATUS_LH) +#define F_FEC_ALIGN_STATUS_LH V_FEC_ALIGN_STATUS_LH(1U) + +#define S_TX_DP_OVERFLOW 9 +#define V_TX_DP_OVERFLOW(x) ((x) << S_TX_DP_OVERFLOW) +#define F_TX_DP_OVERFLOW V_TX_DP_OVERFLOW(1U) + +#define S_RX_DP_OVERFLOW 8 +#define V_RX_DP_OVERFLOW(x) ((x) << S_RX_DP_OVERFLOW) +#define F_RX_DP_OVERFLOW V_RX_DP_OVERFLOW(1U) + +#define S_TX_DATAPATH_RESTART 7 +#define V_TX_DATAPATH_RESTART(x) ((x) << S_TX_DATAPATH_RESTART) +#define F_TX_DATAPATH_RESTART V_TX_DATAPATH_RESTART(1U) + +#define S_RX_DATAPATH_RESTART 6 +#define V_RX_DATAPATH_RESTART(x) ((x) << S_RX_DATAPATH_RESTART) +#define F_RX_DATAPATH_RESTART V_RX_DATAPATH_RESTART(1U) + +#define S_MARKER_CHECK_RESTART 5 +#define V_MARKER_CHECK_RESTART(x) ((x) << S_MARKER_CHECK_RESTART) +#define F_MARKER_CHECK_RESTART V_MARKER_CHECK_RESTART(1U) + +#define S_FEC_ALIGN_STATUS_LL 4 +#define V_FEC_ALIGN_STATUS_LL(x) ((x) << S_FEC_ALIGN_STATUS_LL) +#define F_FEC_ALIGN_STATUS_LL V_FEC_ALIGN_STATUS_LL(1U) + +#define S_AMPS_LOCK 0 +#define M_AMPS_LOCK 0xfU +#define V_AMPS_LOCK(x) ((x) << S_AMPS_LOCK) +#define G_AMPS_LOCK(x) (((x) >> S_AMPS_LOCK) & M_AMPS_LOCK) + +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_INFO_2 0x2408 +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_REVISION 0x240c + +#define S_RS_FEC_VENDOR_REVISION 0 +#define M_RS_FEC_VENDOR_REVISION 0xffffU +#define V_RS_FEC_VENDOR_REVISION(x) ((x) << S_RS_FEC_VENDOR_REVISION) +#define G_RS_FEC_VENDOR_REVISION(x) (((x) >> S_RS_FEC_VENDOR_REVISION) & M_RS_FEC_VENDOR_REVISION) + +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_KEY 0x2410 + +#define S_RS_FEC_VENDOR_TX_TEST_KEY 0 +#define M_RS_FEC_VENDOR_TX_TEST_KEY 0xffffU +#define V_RS_FEC_VENDOR_TX_TEST_KEY(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_KEY) +#define G_RS_FEC_VENDOR_TX_TEST_KEY(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_KEY) & M_RS_FEC_VENDOR_TX_TEST_KEY) + +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_SYMBOLS 0x2414 + +#define S_RS_FEC_VENDOR_TX_TEST_SYMBOLS 0 +#define M_RS_FEC_VENDOR_TX_TEST_SYMBOLS 0xffffU +#define V_RS_FEC_VENDOR_TX_TEST_SYMBOLS(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_SYMBOLS) +#define G_RS_FEC_VENDOR_TX_TEST_SYMBOLS(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_SYMBOLS) & M_RS_FEC_VENDOR_TX_TEST_SYMBOLS) + +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_PATTERN 0x2418 + +#define S_RS_FEC_VENDOR_TX_TEST_PATTERN 0 +#define M_RS_FEC_VENDOR_TX_TEST_PATTERN 0xffffU +#define V_RS_FEC_VENDOR_TX_TEST_PATTERN(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_PATTERN) +#define G_RS_FEC_VENDOR_TX_TEST_PATTERN(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_PATTERN) & M_RS_FEC_VENDOR_TX_TEST_PATTERN) + +#define A_MAC_PORT_MTIP_RS_FEC_VENDOR_TX_TEST_TRIGGER 0x241c + +#define S_RS_FEC_VENDOR_TX_TEST_TRIGGER 0 +#define M_RS_FEC_VENDOR_TX_TEST_TRIGGER 0xffffU +#define V_RS_FEC_VENDOR_TX_TEST_TRIGGER(x) ((x) << S_RS_FEC_VENDOR_TX_TEST_TRIGGER) +#define G_RS_FEC_VENDOR_TX_TEST_TRIGGER(x) (((x) >> S_RS_FEC_VENDOR_TX_TEST_TRIGGER) & M_RS_FEC_VENDOR_TX_TEST_TRIGGER) + #define A_MAC_PORT_FEC_KR_CONTROL 0x2600 #define S_ENABLE_TR 1 @@ -36081,6 +50692,16 @@ #define V_ABILITY(x) ((x) << S_ABILITY) #define F_ABILITY V_ABILITY(1U) +#define A_MAC_PORT_MTIP_FEC_ABILITY 0x2618 + +#define S_BASE_R_FEC_ERROR_INDICATION_ABILITY 1 +#define V_BASE_R_FEC_ERROR_INDICATION_ABILITY(x) ((x) << S_BASE_R_FEC_ERROR_INDICATION_ABILITY) +#define F_BASE_R_FEC_ERROR_INDICATION_ABILITY V_BASE_R_FEC_ERROR_INDICATION_ABILITY(1U) + +#define S_BASE_R_FEC_ABILITY 0 +#define V_BASE_R_FEC_ABILITY(x) ((x) << S_BASE_R_FEC_ABILITY) +#define F_BASE_R_FEC_ABILITY V_BASE_R_FEC_ABILITY(1U) + #define A_MAC_PORT_FEC_CONTROL 0x261c #define S_FEC_EN_ERR_IND 1 @@ -36101,6 +50722,11 @@ #define V_FEC_LOCKED(x) ((x) << S_FEC_LOCKED) #define F_FEC_LOCKED V_FEC_LOCKED(1U) +#define S_FEC_LOCKED0 1 +#define M_FEC_LOCKED0 0xfU +#define V_FEC_LOCKED0(x) ((x) << S_FEC_LOCKED0) +#define G_FEC_LOCKED0(x) (((x) >> S_FEC_LOCKED0) & M_FEC_LOCKED0) + #define A_MAC_PORT_FEC_CERR_CNT_0 0x2624 #define S_FEC_CERR_CNT_0 0 @@ -36108,6 +50734,7 @@ #define V_FEC_CERR_CNT_0(x) ((x) << S_FEC_CERR_CNT_0) #define G_FEC_CERR_CNT_0(x) (((x) >> S_FEC_CERR_CNT_0) & M_FEC_CERR_CNT_0) +#define A_MAC_PORT_MTIP_FEC0_CERR_CNT_0 0x2624 #define A_MAC_PORT_FEC_CERR_CNT_1 0x2628 #define S_FEC_CERR_CNT_1 0 @@ -36115,6 +50742,7 @@ #define V_FEC_CERR_CNT_1(x) ((x) << S_FEC_CERR_CNT_1) #define G_FEC_CERR_CNT_1(x) (((x) >> S_FEC_CERR_CNT_1) & M_FEC_CERR_CNT_1) +#define A_MAC_PORT_MTIP_FEC0_CERR_CNT_1 0x2628 #define A_MAC_PORT_FEC_NCERR_CNT_0 0x262c #define S_FEC_NCERR_CNT_0 0 @@ -36122,6 +50750,13 @@ #define V_FEC_NCERR_CNT_0(x) ((x) << S_FEC_NCERR_CNT_0) #define G_FEC_NCERR_CNT_0(x) (((x) >> S_FEC_NCERR_CNT_0) & M_FEC_NCERR_CNT_0) +#define A_MAC_PORT_MTIP_FEC0_NCERR_CNT_0 0x262c + +#define S_FEC0_NCERR_CNT_0 0 +#define M_FEC0_NCERR_CNT_0 0xffffU +#define V_FEC0_NCERR_CNT_0(x) ((x) << S_FEC0_NCERR_CNT_0) +#define G_FEC0_NCERR_CNT_0(x) (((x) >> S_FEC0_NCERR_CNT_0) & M_FEC0_NCERR_CNT_0) + #define A_MAC_PORT_FEC_NCERR_CNT_1 0x2630 #define S_FEC_NCERR_CNT_1 0 @@ -36129,6 +50764,28 @@ #define V_FEC_NCERR_CNT_1(x) ((x) << S_FEC_NCERR_CNT_1) #define G_FEC_NCERR_CNT_1(x) (((x) >> S_FEC_NCERR_CNT_1) & M_FEC_NCERR_CNT_1) +#define A_MAC_PORT_MTIP_FEC0_NCERR_CNT_1 0x2630 + +#define S_FEC0_NCERR_CNT_1 0 +#define M_FEC0_NCERR_CNT_1 0xffffU +#define V_FEC0_NCERR_CNT_1(x) ((x) << S_FEC0_NCERR_CNT_1) +#define G_FEC0_NCERR_CNT_1(x) (((x) >> S_FEC0_NCERR_CNT_1) & M_FEC0_NCERR_CNT_1) + +#define A_MAC_PORT_MTIP_FEC_STATUS1 0x2664 +#define A_MAC_PORT_MTIP_FEC1_CERR_CNT_0 0x2668 +#define A_MAC_PORT_MTIP_FEC1_CERR_CNT_1 0x266c +#define A_MAC_PORT_MTIP_FEC1_NCERR_CNT_0 0x2670 +#define A_MAC_PORT_MTIP_FEC1_NCERR_CNT_1 0x2674 +#define A_MAC_PORT_MTIP_FEC_STATUS2 0x26a8 +#define A_MAC_PORT_MTIP_FEC2_CERR_CNT_0 0x26ac +#define A_MAC_PORT_MTIP_FEC2_CERR_CNT_1 0x26b0 +#define A_MAC_PORT_MTIP_FEC2_NCERR_CNT_0 0x26b4 +#define A_MAC_PORT_MTIP_FEC2_NCERR_CNT_1 0x26b8 +#define A_MAC_PORT_MTIP_FEC_STATUS3 0x26ec +#define A_MAC_PORT_MTIP_FEC3_CERR_CNT_0 0x26f0 +#define A_MAC_PORT_MTIP_FEC3_CERR_CNT_1 0x26f4 +#define A_MAC_PORT_MTIP_FEC3_NCERR_CNT_0 0x26f8 +#define A_MAC_PORT_MTIP_FEC3_NCERR_CNT_1 0x26fc #define A_MAC_PORT_AE_RX_COEF_REQ 0x2a00 #define S_T5_RXREQ_C2 4 @@ -36146,6 +50803,11 @@ #define V_T5_RXREQ_C0(x) ((x) << S_T5_RXREQ_C0) #define G_T5_RXREQ_C0(x) (((x) >> S_T5_RXREQ_C0) & M_T5_RXREQ_C0) +#define S_T5_RXREQ_C3 6 +#define M_T5_RXREQ_C3 0x3U +#define V_T5_RXREQ_C3(x) ((x) << S_T5_RXREQ_C3) +#define G_T5_RXREQ_C3(x) (((x) >> S_T5_RXREQ_C3) & M_T5_RXREQ_C3) + #define A_MAC_PORT_AE_RX_COEF_STAT 0x2a04 #define S_T5_AE0_RXSTAT_RDY 15 @@ -36167,6 +50829,23 @@ #define V_T5_AE0_RXSTAT_C0(x) ((x) << S_T5_AE0_RXSTAT_C0) #define G_T5_AE0_RXSTAT_C0(x) (((x) >> S_T5_AE0_RXSTAT_C0) & M_T5_AE0_RXSTAT_C0) +#define S_T5_AE0_RXSTAT_LSNA 14 +#define V_T5_AE0_RXSTAT_LSNA(x) ((x) << S_T5_AE0_RXSTAT_LSNA) +#define F_T5_AE0_RXSTAT_LSNA V_T5_AE0_RXSTAT_LSNA(1U) + +#define S_T5_AE0_RXSTAT_FEC 13 +#define V_T5_AE0_RXSTAT_FEC(x) ((x) << S_T5_AE0_RXSTAT_FEC) +#define F_T5_AE0_RXSTAT_FEC V_T5_AE0_RXSTAT_FEC(1U) + +#define S_T5_AE0_RXSTAT_TF 12 +#define V_T5_AE0_RXSTAT_TF(x) ((x) << S_T5_AE0_RXSTAT_TF) +#define F_T5_AE0_RXSTAT_TF V_T5_AE0_RXSTAT_TF(1U) + +#define S_T5_AE0_RXSTAT_C3 6 +#define M_T5_AE0_RXSTAT_C3 0x3U +#define V_T5_AE0_RXSTAT_C3(x) ((x) << S_T5_AE0_RXSTAT_C3) +#define G_T5_AE0_RXSTAT_C3(x) (((x) >> S_T5_AE0_RXSTAT_C3) & M_T5_AE0_RXSTAT_C3) + #define A_MAC_PORT_AE_TX_COEF_REQ 0x2a08 #define S_T5_TXREQ_C2 4 @@ -36184,6 +50863,15 @@ #define V_T5_TXREQ_C0(x) ((x) << S_T5_TXREQ_C0) #define G_T5_TXREQ_C0(x) (((x) >> S_T5_TXREQ_C0) & M_T5_TXREQ_C0) +#define S_TXREQ_FEC 11 +#define V_TXREQ_FEC(x) ((x) << S_TXREQ_FEC) +#define F_TXREQ_FEC V_TXREQ_FEC(1U) + +#define S_T5_TXREQ_C3 6 +#define M_T5_TXREQ_C3 0x3U +#define V_T5_TXREQ_C3(x) ((x) << S_T5_TXREQ_C3) +#define G_T5_TXREQ_C3(x) (((x) >> S_T5_TXREQ_C3) & M_T5_TXREQ_C3) + #define A_MAC_PORT_AE_TX_COEF_STAT 0x2a0c #define S_T5_TXSTAT_C2 4 @@ -36201,6 +50889,11 @@ #define V_T5_TXSTAT_C0(x) ((x) << S_T5_TXSTAT_C0) #define G_T5_TXSTAT_C0(x) (((x) >> S_T5_TXSTAT_C0) & M_T5_TXSTAT_C0) +#define S_T5_TXSTAT_C3 6 +#define M_T5_TXSTAT_C3 0x3U +#define V_T5_TXSTAT_C3(x) ((x) << S_T5_TXSTAT_C3) +#define G_T5_TXSTAT_C3(x) (((x) >> S_T5_TXSTAT_C3) & M_T5_TXSTAT_C3) + #define A_MAC_PORT_AE_REG_MODE 0x2a10 #define S_AET_RSVD 7 @@ -36211,6 +50904,31 @@ #define V_AET_ENABLE(x) ((x) << S_AET_ENABLE) #define F_AET_ENABLE V_AET_ENABLE(1U) +#define S_SET_WAIT_TIMER 13 +#define M_SET_WAIT_TIMER 0x3U +#define V_SET_WAIT_TIMER(x) ((x) << S_SET_WAIT_TIMER) +#define G_SET_WAIT_TIMER(x) (((x) >> S_SET_WAIT_TIMER) & M_SET_WAIT_TIMER) + +#define S_C2_C3_STATE_SEL 12 +#define V_C2_C3_STATE_SEL(x) ((x) << S_C2_C3_STATE_SEL) +#define F_C2_C3_STATE_SEL V_C2_C3_STATE_SEL(1U) + +#define S_FFE4_EN 11 +#define V_FFE4_EN(x) ((x) << S_FFE4_EN) +#define F_FFE4_EN V_FFE4_EN(1U) + +#define S_FEC_REQUEST 10 +#define V_FEC_REQUEST(x) ((x) << S_FEC_REQUEST) +#define F_FEC_REQUEST V_FEC_REQUEST(1U) + +#define S_FEC_SUPPORTED 9 +#define V_FEC_SUPPORTED(x) ((x) << S_FEC_SUPPORTED) +#define F_FEC_SUPPORTED V_FEC_SUPPORTED(1U) + +#define S_TX_FIXED 8 +#define V_TX_FIXED(x) ((x) << S_TX_FIXED) +#define F_TX_FIXED V_TX_FIXED(1U) + #define A_MAC_PORT_AE_PRBS_CTL 0x2a14 #define A_MAC_PORT_AE_FSM_CTL 0x2a18 @@ -36241,6 +50959,23 @@ #define V_T5_AE1_RXSTAT_C0(x) ((x) << S_T5_AE1_RXSTAT_C0) #define G_T5_AE1_RXSTAT_C0(x) (((x) >> S_T5_AE1_RXSTAT_C0) & M_T5_AE1_RXSTAT_C0) +#define S_T5_AE1_RXSTAT_LSNA 14 +#define V_T5_AE1_RXSTAT_LSNA(x) ((x) << S_T5_AE1_RXSTAT_LSNA) +#define F_T5_AE1_RXSTAT_LSNA V_T5_AE1_RXSTAT_LSNA(1U) + +#define S_T5_AE1_RXSTAT_FEC 13 +#define V_T5_AE1_RXSTAT_FEC(x) ((x) << S_T5_AE1_RXSTAT_FEC) +#define F_T5_AE1_RXSTAT_FEC V_T5_AE1_RXSTAT_FEC(1U) + +#define S_T5_AE1_RXSTAT_TF 12 +#define V_T5_AE1_RXSTAT_TF(x) ((x) << S_T5_AE1_RXSTAT_TF) +#define F_T5_AE1_RXSTAT_TF V_T5_AE1_RXSTAT_TF(1U) + +#define S_T5_AE1_RXSTAT_C3 6 +#define M_T5_AE1_RXSTAT_C3 0x3U +#define V_T5_AE1_RXSTAT_C3(x) ((x) << S_T5_AE1_RXSTAT_C3) +#define G_T5_AE1_RXSTAT_C3(x) (((x) >> S_T5_AE1_RXSTAT_C3) & M_T5_AE1_RXSTAT_C3) + #define A_MAC_PORT_AE_TX_COEF_REQ_1 0x2a28 #define A_MAC_PORT_AE_TX_COEF_STAT_1 0x2a2c #define A_MAC_PORT_AE_REG_MODE_1 0x2a30 @@ -36269,6 +51004,23 @@ #define V_T5_AE2_RXSTAT_C0(x) ((x) << S_T5_AE2_RXSTAT_C0) #define G_T5_AE2_RXSTAT_C0(x) (((x) >> S_T5_AE2_RXSTAT_C0) & M_T5_AE2_RXSTAT_C0) +#define S_T5_AE2_RXSTAT_LSNA 14 +#define V_T5_AE2_RXSTAT_LSNA(x) ((x) << S_T5_AE2_RXSTAT_LSNA) +#define F_T5_AE2_RXSTAT_LSNA V_T5_AE2_RXSTAT_LSNA(1U) + +#define S_T5_AE2_RXSTAT_FEC 13 +#define V_T5_AE2_RXSTAT_FEC(x) ((x) << S_T5_AE2_RXSTAT_FEC) +#define F_T5_AE2_RXSTAT_FEC V_T5_AE2_RXSTAT_FEC(1U) + +#define S_T5_AE2_RXSTAT_TF 12 +#define V_T5_AE2_RXSTAT_TF(x) ((x) << S_T5_AE2_RXSTAT_TF) +#define F_T5_AE2_RXSTAT_TF V_T5_AE2_RXSTAT_TF(1U) + +#define S_T5_AE2_RXSTAT_C3 6 +#define M_T5_AE2_RXSTAT_C3 0x3U +#define V_T5_AE2_RXSTAT_C3(x) ((x) << S_T5_AE2_RXSTAT_C3) +#define G_T5_AE2_RXSTAT_C3(x) (((x) >> S_T5_AE2_RXSTAT_C3) & M_T5_AE2_RXSTAT_C3) + #define A_MAC_PORT_AE_TX_COEF_REQ_2 0x2a48 #define A_MAC_PORT_AE_TX_COEF_STAT_2 0x2a4c #define A_MAC_PORT_AE_REG_MODE_2 0x2a50 @@ -36297,6 +51049,23 @@ #define V_T5_AE3_RXSTAT_C0(x) ((x) << S_T5_AE3_RXSTAT_C0) #define G_T5_AE3_RXSTAT_C0(x) (((x) >> S_T5_AE3_RXSTAT_C0) & M_T5_AE3_RXSTAT_C0) +#define S_T5_AE3_RXSTAT_LSNA 14 +#define V_T5_AE3_RXSTAT_LSNA(x) ((x) << S_T5_AE3_RXSTAT_LSNA) +#define F_T5_AE3_RXSTAT_LSNA V_T5_AE3_RXSTAT_LSNA(1U) + +#define S_T5_AE3_RXSTAT_FEC 13 +#define V_T5_AE3_RXSTAT_FEC(x) ((x) << S_T5_AE3_RXSTAT_FEC) +#define F_T5_AE3_RXSTAT_FEC V_T5_AE3_RXSTAT_FEC(1U) + +#define S_T5_AE3_RXSTAT_TF 12 +#define V_T5_AE3_RXSTAT_TF(x) ((x) << S_T5_AE3_RXSTAT_TF) +#define F_T5_AE3_RXSTAT_TF V_T5_AE3_RXSTAT_TF(1U) + +#define S_T5_AE3_RXSTAT_C3 6 +#define M_T5_AE3_RXSTAT_C3 0x3U +#define V_T5_AE3_RXSTAT_C3(x) ((x) << S_T5_AE3_RXSTAT_C3) +#define G_T5_AE3_RXSTAT_C3(x) (((x) >> S_T5_AE3_RXSTAT_C3) & M_T5_AE3_RXSTAT_C3) + #define A_MAC_PORT_AE_TX_COEF_REQ_3 0x2a68 #define A_MAC_PORT_AE_TX_COEF_STAT_3 0x2a6c #define A_MAC_PORT_AE_REG_MODE_3 0x2a70 @@ -36360,6 +51129,20 @@ #define V_H1TEQ_GOAL(x) ((x) << S_H1TEQ_GOAL) #define G_H1TEQ_GOAL(x) (((x) >> S_H1TEQ_GOAL) & M_H1TEQ_GOAL) +#define S_T6_INIT_METH 12 +#define M_T6_INIT_METH 0xfU +#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH) +#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH) + +#define S_INIT_CNT 8 +#define M_INIT_CNT 0xfU +#define V_INIT_CNT(x) ((x) << S_INIT_CNT) +#define G_INIT_CNT(x) (((x) >> S_INIT_CNT) & M_INIT_CNT) + +#define S_EN_AI_N0 5 +#define V_EN_AI_N0(x) ((x) << S_EN_AI_N0) +#define F_EN_AI_N0 V_EN_AI_N0(1U) + #define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_0 0x2b04 #define S_GAIN_TH 6 @@ -36380,6 +51163,23 @@ #define V_AMIN_TH(x) ((x) << S_AMIN_TH) #define G_AMIN_TH(x) (((x) >> S_AMIN_TH) & M_AMIN_TH) +#define S_FEC_CNV 15 +#define V_FEC_CNV(x) ((x) << S_FEC_CNV) +#define F_FEC_CNV V_FEC_CNV(1U) + +#define S_EN_RETRY 14 +#define V_EN_RETRY(x) ((x) << S_EN_RETRY) +#define F_EN_RETRY V_EN_RETRY(1U) + +#define S_DPC_METH 12 +#define M_DPC_METH 0x3U +#define V_DPC_METH(x) ((x) << S_DPC_METH) +#define G_DPC_METH(x) (((x) >> S_DPC_METH) & M_DPC_METH) + +#define S_EN_P2 11 +#define V_EN_P2(x) ((x) << S_EN_P2) +#define F_EN_P2 V_EN_P2(1U) + #define A_MAC_PORT_AET_ZFE_LIMITS_0 0x2b08 #define S_ACC_LIM 8 @@ -36423,6 +51223,11 @@ #define V_BOOT_DEC_C0(x) ((x) << S_BOOT_DEC_C0) #define F_BOOT_DEC_C0 V_BOOT_DEC_C0(1U) +#define S_BOOT_LUT5 8 +#define M_BOOT_LUT5 0xfU +#define V_BOOT_LUT5(x) ((x) << S_BOOT_LUT5) +#define G_BOOT_LUT5(x) (((x) >> S_BOOT_LUT5) & M_BOOT_LUT5) + #define A_MAC_PORT_AET_STATUS_0 0x2b10 #define S_AET_STAT 9 @@ -36440,21 +51245,146 @@ #define V_CTRL_STATE(x) ((x) << S_CTRL_STATE) #define G_CTRL_STATE(x) (((x) >> S_CTRL_STATE) & M_CTRL_STATE) +#define S_CTRL_STAT 8 +#define M_CTRL_STAT 0x1fU +#define V_CTRL_STAT(x) ((x) << S_CTRL_STAT) +#define G_CTRL_STAT(x) (((x) >> S_CTRL_STAT) & M_CTRL_STAT) + +#define S_T6_NEU_STATE 4 +#define M_T6_NEU_STATE 0xfU +#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE) +#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE) + +#define S_T6_CTRL_STATE 0 +#define M_T6_CTRL_STATE 0xfU +#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE) +#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE) + +#define A_MAC_PORT_AET_STATUS_20 0x2b14 + +#define S_FRAME_LOCK_CNT 0 +#define M_FRAME_LOCK_CNT 0x7U +#define V_FRAME_LOCK_CNT(x) ((x) << S_FRAME_LOCK_CNT) +#define G_FRAME_LOCK_CNT(x) (((x) >> S_FRAME_LOCK_CNT) & M_FRAME_LOCK_CNT) + +#define A_MAC_PORT_AET_LIMITS0 0x2b18 + +#define S_DPC_TIME_LIM 0 +#define M_DPC_TIME_LIM 0x3U +#define V_DPC_TIME_LIM(x) ((x) << S_DPC_TIME_LIM) +#define G_DPC_TIME_LIM(x) (((x) >> S_DPC_TIME_LIM) & M_DPC_TIME_LIM) + #define A_MAC_PORT_AET_STAGE_CONFIGURATION_1 0x2b20 + +#define S_T6_INIT_METH 12 +#define M_T6_INIT_METH 0xfU +#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH) +#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH) + #define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_1 0x2b24 #define A_MAC_PORT_AET_ZFE_LIMITS_1 0x2b28 #define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_1 0x2b2c #define A_MAC_PORT_AET_STATUS_1 0x2b30 + +#define S_T6_NEU_STATE 4 +#define M_T6_NEU_STATE 0xfU +#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE) +#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE) + +#define S_T6_CTRL_STATE 0 +#define M_T6_CTRL_STATE 0xfU +#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE) +#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE) + +#define A_MAC_PORT_AET_STATUS_21 0x2b34 +#define A_MAC_PORT_AET_LIMITS1 0x2b38 #define A_MAC_PORT_AET_STAGE_CONFIGURATION_2 0x2b40 + +#define S_T6_INIT_METH 12 +#define M_T6_INIT_METH 0xfU +#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH) +#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH) + #define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_2 0x2b44 #define A_MAC_PORT_AET_ZFE_LIMITS_2 0x2b48 #define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_2 0x2b4c #define A_MAC_PORT_AET_STATUS_2 0x2b50 + +#define S_T6_NEU_STATE 4 +#define M_T6_NEU_STATE 0xfU +#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE) +#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE) + +#define S_T6_CTRL_STATE 0 +#define M_T6_CTRL_STATE 0xfU +#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE) +#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE) + +#define A_MAC_PORT_AET_STATUS_22 0x2b54 +#define A_MAC_PORT_AET_LIMITS2 0x2b58 #define A_MAC_PORT_AET_STAGE_CONFIGURATION_3 0x2b60 + +#define S_T6_INIT_METH 12 +#define M_T6_INIT_METH 0xfU +#define V_T6_INIT_METH(x) ((x) << S_T6_INIT_METH) +#define G_T6_INIT_METH(x) (((x) >> S_T6_INIT_METH) & M_T6_INIT_METH) + #define A_MAC_PORT_AET_SIGNAL_LOSS_DETECTION_3 0x2b64 #define A_MAC_PORT_AET_ZFE_LIMITS_3 0x2b68 #define A_MAC_PORT_AET_BOOTSTRAP_LOOKUP_TABLE_3 0x2b6c #define A_MAC_PORT_AET_STATUS_3 0x2b70 + +#define S_T6_NEU_STATE 4 +#define M_T6_NEU_STATE 0xfU +#define V_T6_NEU_STATE(x) ((x) << S_T6_NEU_STATE) +#define G_T6_NEU_STATE(x) (((x) >> S_T6_NEU_STATE) & M_T6_NEU_STATE) + +#define S_T6_CTRL_STATE 0 +#define M_T6_CTRL_STATE 0xfU +#define V_T6_CTRL_STATE(x) ((x) << S_T6_CTRL_STATE) +#define G_T6_CTRL_STATE(x) (((x) >> S_T6_CTRL_STATE) & M_T6_CTRL_STATE) + +#define A_MAC_PORT_AET_STATUS_23 0x2b74 +#define A_MAC_PORT_AET_LIMITS3 0x2b78 +#define A_T6_MAC_PORT_BEAN_CTL 0x2c00 +#define A_T6_MAC_PORT_BEAN_STATUS 0x2c04 +#define A_T6_MAC_PORT_BEAN_ABILITY_0 0x2c08 + +#define S_BEAN_REM_FAULT 13 +#define V_BEAN_REM_FAULT(x) ((x) << S_BEAN_REM_FAULT) +#define F_BEAN_REM_FAULT V_BEAN_REM_FAULT(1U) + +#define A_T6_MAC_PORT_BEAN_ABILITY_1 0x2c0c +#define A_T6_MAC_PORT_BEAN_ABILITY_2 0x2c10 +#define A_T6_MAC_PORT_BEAN_REM_ABILITY_0 0x2c14 + +#define S_BEAN_ABL_REM_FAULT 13 +#define V_BEAN_ABL_REM_FAULT(x) ((x) << S_BEAN_ABL_REM_FAULT) +#define F_BEAN_ABL_REM_FAULT V_BEAN_ABL_REM_FAULT(1U) + +#define A_T6_MAC_PORT_BEAN_REM_ABILITY_1 0x2c18 +#define A_T6_MAC_PORT_BEAN_REM_ABILITY_2 0x2c1c +#define A_T6_MAC_PORT_BEAN_MS_COUNT 0x2c20 +#define A_T6_MAC_PORT_BEAN_XNP_0 0x2c24 +#define A_T6_MAC_PORT_BEAN_XNP_1 0x2c28 +#define A_T6_MAC_PORT_BEAN_XNP_2 0x2c2c +#define A_T6_MAC_PORT_LP_BEAN_XNP_0 0x2c30 +#define A_T6_MAC_PORT_LP_BEAN_XNP_1 0x2c34 +#define A_T6_MAC_PORT_LP_BEAN_XNP_2 0x2c38 +#define A_T6_MAC_PORT_BEAN_ETH_STATUS 0x2c3c + +#define S_100GCR4 11 +#define V_100GCR4(x) ((x) << S_100GCR4) +#define F_100GCR4 V_100GCR4(1U) + +#define S_100GKR4 10 +#define V_100GKR4(x) ((x) << S_100GKR4) +#define F_100GKR4 V_100GKR4(1U) + +#define S_100GKP4 9 +#define V_100GKP4(x) ((x) << S_100GKP4) +#define F_100GKP4 V_100GKP4(1U) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_CONFIGURATION_MODE 0x3000 #define S_T5_TX_LINKEN 15 @@ -36512,6 +51442,19 @@ #define V_T5_TX_RTSEL(x) ((x) << S_T5_TX_RTSEL) #define G_T5_TX_RTSEL(x) (((x) >> S_T5_TX_RTSEL) & M_T5_TX_RTSEL) +#define S_T6_T5_TX_RXLOOP 5 +#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP) +#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U) + +#define S_T5_TX_ENFFE4 4 +#define V_T5_TX_ENFFE4(x) ((x) << S_T5_TX_ENFFE4) +#define F_T5_TX_ENFFE4 V_T5_TX_ENFFE4(1U) + +#define S_T6_T5_TX_BWSEL 2 +#define M_T6_T5_TX_BWSEL 0x3U +#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL) +#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TEST_CONTROL 0x3004 #define S_SPSEL 11 @@ -36527,6 +51470,22 @@ #define V_TPGMD(x) ((x) << S_TPGMD) #define F_TPGMD V_TPGMD(1U) +#define S_TC_FRCERR 10 +#define V_TC_FRCERR(x) ((x) << S_TC_FRCERR) +#define F_TC_FRCERR V_TC_FRCERR(1U) + +#define S_T6_ERROR 9 +#define V_T6_ERROR(x) ((x) << S_T6_ERROR) +#define F_T6_ERROR V_T6_ERROR(1U) + +#define S_SYNC 8 +#define V_SYNC(x) ((x) << S_SYNC) +#define F_SYNC V_SYNC(1U) + +#define S_P7CHK 5 +#define V_P7CHK(x) ((x) << S_P7CHK) +#define F_P7CHK V_P7CHK(1U) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_COEFFICIENT_CONTROL 0x3008 #define S_ZCALOVRD 8 @@ -36545,6 +51504,10 @@ #define V_AESRC(x) ((x) << S_AESRC) #define F_AESRC V_AESRC(1U) +#define S_SASMODE 7 +#define V_SASMODE(x) ((x) << S_SASMODE) +#define F_SASMODE V_SASMODE(1U) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_DRIVER_MODE_CONTROL 0x300c #define S_T5DRVHIZ 5 @@ -36614,6 +51577,16 @@ #define V_CALSSTP(x) ((x) << S_CALSSTP) #define G_CALSSTP(x) (((x) >> S_CALSSTP) & M_CALSSTP) +#define S_T6_CALSSTN 8 +#define M_T6_CALSSTN 0x3fU +#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN) +#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN) + +#define S_T6_CALSSTP 0 +#define M_T6_CALSSTP 0x3fU +#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP) +#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x301c #define S_DRTOL 0 @@ -36621,6 +51594,11 @@ #define V_DRTOL(x) ((x) << S_DRTOL) #define G_DRTOL(x) (((x) >> S_DRTOL) & M_DRTOL) +#define S_T6_DRTOL 2 +#define M_T6_DRTOL 0x7U +#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL) +#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_0_COEFFICIENT 0x3020 #define S_T5NXTT0 0 @@ -36628,6 +51606,11 @@ #define V_T5NXTT0(x) ((x) << S_T5NXTT0) #define G_T5NXTT0(x) (((x) >> S_T5NXTT0) & M_T5NXTT0) +#define S_T6_NXTT0 0 +#define M_T6_NXTT0 0x3fU +#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0) +#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_1_COEFFICIENT 0x3024 #define S_T5NXTT1 0 @@ -36642,6 +51625,18 @@ #define V_T5NXTT2(x) ((x) << S_T5NXTT2) #define G_T5NXTT2(x) (((x) >> S_T5NXTT2) & M_T5NXTT2) +#define S_T6_NXTT2 0 +#define M_T6_NXTT2 0x3fU +#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2) +#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_3_COEFFICIENT 0x302c + +#define S_NXTT3 0 +#define M_NXTT3 0x3fU +#define V_NXTT3(x) ((x) << S_NXTT3) +#define G_NXTT3(x) (((x) >> S_NXTT3) & M_NXTT3) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_AMPLITUDE 0x3030 #define S_T5TXPWR 0 @@ -36656,6 +51651,11 @@ #define V_NXTPOL(x) ((x) << S_NXTPOL) #define G_NXTPOL(x) (((x) >> S_NXTPOL) & M_NXTPOL) +#define S_T6_NXTPOL 0 +#define M_T6_NXTPOL 0xfU +#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL) +#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3038 #define S_CPREST 13 @@ -36666,8 +51666,61 @@ #define V_CINIT(x) ((x) << S_CINIT) #define F_CINIT V_CINIT(1U) +#define S_SASCMD 10 +#define M_SASCMD 0x3U +#define V_SASCMD(x) ((x) << S_SASCMD) +#define G_SASCMD(x) (((x) >> S_SASCMD) & M_SASCMD) + +#define S_T6_C0UPDT 6 +#define M_T6_C0UPDT 0x3U +#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT) +#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT) + +#define S_C3UPDT 4 +#define M_C3UPDT 0x3U +#define V_C3UPDT(x) ((x) << S_C3UPDT) +#define G_C3UPDT(x) (((x) >> S_C3UPDT) & M_C3UPDT) + +#define S_T6_C2UPDT 2 +#define M_T6_C2UPDT 0x3U +#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT) +#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT) + +#define S_T6_C1UPDT 0 +#define M_T6_C1UPDT 0x3U +#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT) +#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x303c + +#define S_T6_C0STAT 6 +#define M_T6_C0STAT 0x3U +#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT) +#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT) + +#define S_C3STAT 4 +#define M_C3STAT 0x3U +#define V_C3STAT(x) ((x) << S_C3STAT) +#define G_C3STAT(x) (((x) >> S_C3STAT) & M_C3STAT) + +#define S_T6_C2STAT 2 +#define M_T6_C2STAT 0x3U +#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT) +#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT) + +#define S_T6_C1STAT 0 +#define M_T6_C1STAT 0x3U +#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT) +#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3040 +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3040 + +#define S_AETAP0 0 +#define M_AETAP0 0x7fU +#define V_AETAP0(x) ((x) << S_AETAP0) +#define G_AETAP0(x) (((x) >> S_AETAP0) & M_AETAP0) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3044 #define S_T5NIDAC1 0 @@ -36675,6 +51728,13 @@ #define V_T5NIDAC1(x) ((x) << S_T5NIDAC1) #define G_T5NIDAC1(x) (((x) >> S_T5NIDAC1) & M_T5NIDAC1) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3044 + +#define S_AETAP1 0 +#define M_AETAP1 0x7fU +#define V_AETAP1(x) ((x) << S_AETAP1) +#define G_AETAP1(x) (((x) >> S_AETAP1) & M_AETAP1) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3048 #define S_T5NIDAC2 0 @@ -36682,7 +51742,81 @@ #define V_T5NIDAC2(x) ((x) << S_T5NIDAC2) #define G_T5NIDAC2(x) (((x) >> S_T5NIDAC2) & M_T5NIDAC2) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3048 + +#define S_AETAP2 0 +#define M_AETAP2 0x7fU +#define V_AETAP2(x) ((x) << S_AETAP2) +#define G_AETAP2(x) (((x) >> S_AETAP2) & M_AETAP2) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x304c + +#define S_AETAP3 0 +#define M_AETAP3 0x7fU +#define V_AETAP3(x) ((x) << S_AETAP3) +#define G_AETAP3(x) (((x) >> S_AETAP3) & M_AETAP3) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_APPLIED_TUNE_REGISTER 0x3050 + +#define S_ATUNEN 8 +#define M_ATUNEN 0xffU +#define V_ATUNEN(x) ((x) << S_ATUNEN) +#define G_ATUNEN(x) (((x) >> S_ATUNEN) & M_ATUNEN) + +#define S_ATUNEP 0 +#define M_ATUNEP 0xffU +#define V_ATUNEP(x) ((x) << S_ATUNEP) +#define G_ATUNEP(x) (((x) >> S_ATUNEP) & M_ATUNEP) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3058 + +#define S_DCCCOMPINV 8 +#define V_DCCCOMPINV(x) ((x) << S_DCCCOMPINV) +#define F_DCCCOMPINV V_DCCCOMPINV(1U) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3060 +#define A_MAC_PORT_TX_LINKA_TRANSMIT_4X_SEGMENT_APPLIED 0x3060 + +#define S_AS4X7 14 +#define M_AS4X7 0x3U +#define V_AS4X7(x) ((x) << S_AS4X7) +#define G_AS4X7(x) (((x) >> S_AS4X7) & M_AS4X7) + +#define S_AS4X6 12 +#define M_AS4X6 0x3U +#define V_AS4X6(x) ((x) << S_AS4X6) +#define G_AS4X6(x) (((x) >> S_AS4X6) & M_AS4X6) + +#define S_AS4X5 10 +#define M_AS4X5 0x3U +#define V_AS4X5(x) ((x) << S_AS4X5) +#define G_AS4X5(x) (((x) >> S_AS4X5) & M_AS4X5) + +#define S_AS4X4 8 +#define M_AS4X4 0x3U +#define V_AS4X4(x) ((x) << S_AS4X4) +#define G_AS4X4(x) (((x) >> S_AS4X4) & M_AS4X4) + +#define S_AS4X3 6 +#define M_AS4X3 0x3U +#define V_AS4X3(x) ((x) << S_AS4X3) +#define G_AS4X3(x) (((x) >> S_AS4X3) & M_AS4X3) + +#define S_AS4X2 4 +#define M_AS4X2 0x3U +#define V_AS4X2(x) ((x) << S_AS4X2) +#define G_AS4X2(x) (((x) >> S_AS4X2) & M_AS4X2) + +#define S_AS4X1 2 +#define M_AS4X1 0x3U +#define V_AS4X1(x) ((x) << S_AS4X1) +#define G_AS4X1(x) (((x) >> S_AS4X1) & M_AS4X1) + +#define S_AS4X0 0 +#define M_AS4X0 0x3U +#define V_AS4X0(x) ((x) << S_AS4X0) +#define G_AS4X0(x) (((x) >> S_AS4X0) & M_AS4X0) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3064 #define S_T5AIDAC1 0 @@ -36690,7 +51824,78 @@ #define V_T5AIDAC1(x) ((x) << S_T5AIDAC1) #define G_T5AIDAC1(x) (((x) >> S_T5AIDAC1) & M_T5AIDAC1) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_2X_SEGMENT_APPLIED 0x3064 + +#define S_AS2X3 6 +#define M_AS2X3 0x3U +#define V_AS2X3(x) ((x) << S_AS2X3) +#define G_AS2X3(x) (((x) >> S_AS2X3) & M_AS2X3) + +#define S_AS2X2 4 +#define M_AS2X2 0x3U +#define V_AS2X2(x) ((x) << S_AS2X2) +#define G_AS2X2(x) (((x) >> S_AS2X2) & M_AS2X2) + +#define S_AS2X1 2 +#define M_AS2X1 0x3U +#define V_AS2X1(x) ((x) << S_AS2X1) +#define G_AS2X1(x) (((x) >> S_AS2X1) & M_AS2X1) + +#define S_AS2X0 0 +#define M_AS2X0 0x3U +#define V_AS2X0(x) ((x) << S_AS2X0) +#define G_AS2X0(x) (((x) >> S_AS2X0) & M_AS2X0) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3068 +#define A_MAC_PORT_TX_LINKA_TRANSMIT_1X_SEGMENT_APPLIED 0x3068 + +#define S_AS1X7 14 +#define M_AS1X7 0x3U +#define V_AS1X7(x) ((x) << S_AS1X7) +#define G_AS1X7(x) (((x) >> S_AS1X7) & M_AS1X7) + +#define S_AS1X6 12 +#define M_AS1X6 0x3U +#define V_AS1X6(x) ((x) << S_AS1X6) +#define G_AS1X6(x) (((x) >> S_AS1X6) & M_AS1X6) + +#define S_AS1X5 10 +#define M_AS1X5 0x3U +#define V_AS1X5(x) ((x) << S_AS1X5) +#define G_AS1X5(x) (((x) >> S_AS1X5) & M_AS1X5) + +#define S_AS1X4 8 +#define M_AS1X4 0x3U +#define V_AS1X4(x) ((x) << S_AS1X4) +#define G_AS1X4(x) (((x) >> S_AS1X4) & M_AS1X4) + +#define S_AS1X3 6 +#define M_AS1X3 0x3U +#define V_AS1X3(x) ((x) << S_AS1X3) +#define G_AS1X3(x) (((x) >> S_AS1X3) & M_AS1X3) + +#define S_AS1X2 4 +#define M_AS1X2 0x3U +#define V_AS1X2(x) ((x) << S_AS1X2) +#define G_AS1X2(x) (((x) >> S_AS1X2) & M_AS1X2) + +#define S_AS1X1 2 +#define M_AS1X1 0x3U +#define V_AS1X1(x) ((x) << S_AS1X1) +#define G_AS1X1(x) (((x) >> S_AS1X1) & M_AS1X1) + +#define S_AS1X0 0 +#define M_AS1X0 0x3U +#define V_AS1X0(x) ((x) << S_AS1X0) +#define G_AS1X0(x) (((x) >> S_AS1X0) & M_AS1X0) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x306c + +#define S_AT4X 0 +#define M_AT4X 0xffU +#define V_AT4X(x) ((x) << S_AT4X) +#define G_AT4X(x) (((x) >> S_AT4X) & M_AT4X) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3070 #define S_MAINSC 6 @@ -36703,6 +51908,13 @@ #define V_POSTSC(x) ((x) << S_POSTSC) #define G_POSTSC(x) (((x) >> S_POSTSC) & M_POSTSC) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3070 + +#define S_AT2X 8 +#define M_AT2X 0xfU +#define V_AT2X(x) ((x) << S_AT2X) +#define G_AT2X(x) (((x) >> S_AT2X) & M_AT2X) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3074 #define S_PRESC 0 @@ -36710,6 +51922,13 @@ #define V_PRESC(x) ((x) << S_PRESC) #define G_PRESC(x) (((x) >> S_PRESC) & M_PRESC) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3074 + +#define S_ATSIGN 0 +#define M_ATSIGN 0xfU +#define V_ATSIGN(x) ((x) << S_ATSIGN) +#define G_ATSIGN(x) (((x) >> S_ATSIGN) & M_ATSIGN) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3078 #define A_MAC_PORT_TX_LINKA_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x307c @@ -36722,6 +51941,11 @@ #define V_T5XWR(x) ((x) << S_T5XWR) #define F_T5XWR V_T5XWR(1U) +#define S_T6_XADDR 1 +#define M_T6_XADDR 0x1fU +#define V_T6_XADDR(x) ((x) << S_T6_XADDR) +#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3080 #define S_XDAT10 0 @@ -36743,6 +51967,13 @@ #define V_XDAT4(x) ((x) << S_XDAT4) #define G_XDAT4(x) (((x) >> S_XDAT4) & M_XDAT4) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3088 + +#define S_XDAT54 0 +#define M_XDAT54 0xffffU +#define V_XDAT54(x) ((x) << S_XDAT54) +#define G_XDAT54(x) (((x) >> S_XDAT54) & M_XDAT54) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_DCC_CONTROL 0x308c #define S_DCCTIMEDOUT 15 @@ -36776,6 +52007,13 @@ #define V_DCCAEN(x) ((x) << S_DCCAEN) #define F_DCCAEN V_DCCAEN(1U) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x308c + +#define S_XDAT76 0 +#define M_XDAT76 0xffffU +#define V_XDAT76(x) ((x) << S_XDAT76) +#define G_XDAT76(x) (((x) >> S_XDAT76) & M_XDAT76) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_DCC_OVERRIDE 0x3090 #define S_DCCOUT 12 @@ -36839,6 +52077,203 @@ #define V_LPIPRCD(x) ((x) << S_LPIPRCD) #define G_LPIPRCD(x) (((x) >> S_LPIPRCD) & M_LPIPRCD) +#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_CONTROL 0x30a0 + +#define S_T6_DCCTIMEEN 13 +#define M_T6_DCCTIMEEN 0x3U +#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN) +#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN) + +#define S_T6_DCCLOCK 11 +#define M_T6_DCCLOCK 0x3U +#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK) +#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK) + +#define S_T6_DCCOFFSET 8 +#define M_T6_DCCOFFSET 0x7U +#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET) +#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET) + +#define S_TX_LINKA_DCCSTEP_CTL 6 +#define M_TX_LINKA_DCCSTEP_CTL 0x3U +#define V_TX_LINKA_DCCSTEP_CTL(x) ((x) << S_TX_LINKA_DCCSTEP_CTL) +#define G_TX_LINKA_DCCSTEP_CTL(x) (((x) >> S_TX_LINKA_DCCSTEP_CTL) & M_TX_LINKA_DCCSTEP_CTL) + +#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_OVERRIDE 0x30a4 +#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_APPLIED 0x30a8 +#define A_T6_MAC_PORT_TX_LINKA_TRANSMIT_DCC_TIME_OUT 0x30ac +#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SIGN_OVERRIDE 0x30c0 + +#define S_OSIGN 0 +#define M_OSIGN 0xfU +#define V_OSIGN(x) ((x) << S_OSIGN) +#define G_OSIGN(x) (((x) >> S_OSIGN) & M_OSIGN) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_4X_OVERRIDE 0x30c8 + +#define S_OS4X7 14 +#define M_OS4X7 0x3U +#define V_OS4X7(x) ((x) << S_OS4X7) +#define G_OS4X7(x) (((x) >> S_OS4X7) & M_OS4X7) + +#define S_OS4X6 12 +#define M_OS4X6 0x3U +#define V_OS4X6(x) ((x) << S_OS4X6) +#define G_OS4X6(x) (((x) >> S_OS4X6) & M_OS4X6) + +#define S_OS4X5 10 +#define M_OS4X5 0x3U +#define V_OS4X5(x) ((x) << S_OS4X5) +#define G_OS4X5(x) (((x) >> S_OS4X5) & M_OS4X5) + +#define S_OS4X4 8 +#define M_OS4X4 0x3U +#define V_OS4X4(x) ((x) << S_OS4X4) +#define G_OS4X4(x) (((x) >> S_OS4X4) & M_OS4X4) + +#define S_OS4X3 6 +#define M_OS4X3 0x3U +#define V_OS4X3(x) ((x) << S_OS4X3) +#define G_OS4X3(x) (((x) >> S_OS4X3) & M_OS4X3) + +#define S_OS4X2 4 +#define M_OS4X2 0x3U +#define V_OS4X2(x) ((x) << S_OS4X2) +#define G_OS4X2(x) (((x) >> S_OS4X2) & M_OS4X2) + +#define S_OS4X1 2 +#define M_OS4X1 0x3U +#define V_OS4X1(x) ((x) << S_OS4X1) +#define G_OS4X1(x) (((x) >> S_OS4X1) & M_OS4X1) + +#define S_OS4X0 0 +#define M_OS4X0 0x3U +#define V_OS4X0(x) ((x) << S_OS4X0) +#define G_OS4X0(x) (((x) >> S_OS4X0) & M_OS4X0) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_2X_OVERRIDE 0x30cc + +#define S_OS2X3 6 +#define M_OS2X3 0x3U +#define V_OS2X3(x) ((x) << S_OS2X3) +#define G_OS2X3(x) (((x) >> S_OS2X3) & M_OS2X3) + +#define S_OS2X2 4 +#define M_OS2X2 0x3U +#define V_OS2X2(x) ((x) << S_OS2X2) +#define G_OS2X2(x) (((x) >> S_OS2X2) & M_OS2X2) + +#define S_OS2X1 2 +#define M_OS2X1 0x3U +#define V_OS2X1(x) ((x) << S_OS2X1) +#define G_OS2X1(x) (((x) >> S_OS2X1) & M_OS2X1) + +#define S_OS2X0 0 +#define M_OS2X0 0x3U +#define V_OS2X0(x) ((x) << S_OS2X0) +#define G_OS2X0(x) (((x) >> S_OS2X0) & M_OS2X0) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_SEGMENT_1X_OVERRIDE 0x30d0 + +#define S_OS1X7 14 +#define M_OS1X7 0x3U +#define V_OS1X7(x) ((x) << S_OS1X7) +#define G_OS1X7(x) (((x) >> S_OS1X7) & M_OS1X7) + +#define S_OS1X6 12 +#define M_OS1X6 0x3U +#define V_OS1X6(x) ((x) << S_OS1X6) +#define G_OS1X6(x) (((x) >> S_OS1X6) & M_OS1X6) + +#define S_OS1X5 10 +#define M_OS1X5 0x3U +#define V_OS1X5(x) ((x) << S_OS1X5) +#define G_OS1X5(x) (((x) >> S_OS1X5) & M_OS1X5) + +#define S_OS1X4 8 +#define M_OS1X4 0x3U +#define V_OS1X4(x) ((x) << S_OS1X4) +#define G_OS1X4(x) (((x) >> S_OS1X4) & M_OS1X4) + +#define S_OS1X3 6 +#define M_OS1X3 0x3U +#define V_OS1X3(x) ((x) << S_OS1X3) +#define G_OS1X3(x) (((x) >> S_OS1X3) & M_OS1X3) + +#define S_OS1X2 4 +#define M_OS1X2 0x3U +#define V_OS1X2(x) ((x) << S_OS1X2) +#define G_OS1X2(x) (((x) >> S_OS1X2) & M_OS1X2) + +#define S_OS1X1 2 +#define M_OS1X1 0x3U +#define V_OS1X1(x) ((x) << S_OS1X1) +#define G_OS1X1(x) (((x) >> S_OS1X1) & M_OS1X1) + +#define S_OS1X0 0 +#define M_OS1X0 0x3U +#define V_OS1X0(x) ((x) << S_OS1X0) +#define G_OS1X0(x) (((x) >> S_OS1X0) & M_OS1X0) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x30d8 + +#define S_OT4X 0 +#define M_OT4X 0xffU +#define V_OT4X(x) ((x) << S_OT4X) +#define G_OT4X(x) (((x) >> S_OT4X) & M_OT4X) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x30dc + +#define S_OT2X 0 +#define M_OT2X 0xfU +#define V_OT2X(x) ((x) << S_OT2X) +#define G_OT2X(x) (((x) >> S_OT2X) & M_OT2X) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x30e0 + +#define S_OT1X 0 +#define M_OT1X 0xffU +#define V_OT1X(x) ((x) << S_OT1X) +#define G_OT1X(x) (((x) >> S_OT1X) & M_OT1X) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_5 0x30ec + +#define S_ERRORP 15 +#define V_ERRORP(x) ((x) << S_ERRORP) +#define F_ERRORP V_ERRORP(1U) + +#define S_ERRORN 14 +#define V_ERRORN(x) ((x) << S_ERRORN) +#define F_ERRORN V_ERRORN(1U) + +#define S_TESTENA 13 +#define V_TESTENA(x) ((x) << S_TESTENA) +#define F_TESTENA V_TESTENA(1U) + +#define S_TUNEBIT 10 +#define M_TUNEBIT 0x7U +#define V_TUNEBIT(x) ((x) << S_TUNEBIT) +#define G_TUNEBIT(x) (((x) >> S_TUNEBIT) & M_TUNEBIT) + +#define S_DATAPOS 8 +#define M_DATAPOS 0x3U +#define V_DATAPOS(x) ((x) << S_DATAPOS) +#define G_DATAPOS(x) (((x) >> S_DATAPOS) & M_DATAPOS) + +#define S_SEGSEL 3 +#define M_SEGSEL 0x1fU +#define V_SEGSEL(x) ((x) << S_SEGSEL) +#define G_SEGSEL(x) (((x) >> S_SEGSEL) & M_SEGSEL) + +#define S_TAPSEL 1 +#define M_TAPSEL 0x3U +#define V_TAPSEL(x) ((x) << S_TAPSEL) +#define G_TAPSEL(x) (((x) >> S_TAPSEL) & M_TAPSEL) + +#define S_DATASIGN 0 +#define V_DATASIGN(x) ((x) << S_DATASIGN) +#define F_DATASIGN V_DATASIGN(1U) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_4 0x30f0 #define S_SDOVRDEN 8 @@ -36850,6 +52285,11 @@ #define V_SDOVRD(x) ((x) << S_SDOVRD) #define G_SDOVRD(x) (((x) >> S_SDOVRD) & M_SDOVRD) +#define S_T6_SDOVRD 0 +#define M_T6_SDOVRD 0xffffU +#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD) +#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_3 0x30f4 #define S_SLEWCODE 1 @@ -36861,6 +52301,11 @@ #define V_ASEGEN(x) ((x) << S_ASEGEN) #define F_ASEGEN V_ASEGEN(1U) +#define S_WCNT 0 +#define M_WCNT 0x3ffU +#define V_WCNT(x) ((x) << S_WCNT) +#define G_WCNT(x) (((x) >> S_WCNT) & M_WCNT) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_MACRO_TEST_CONTROL_2 0x30f8 #define S_AECMDVAL 14 @@ -36922,43 +52367,202 @@ #define V_OBS(x) ((x) << S_OBS) #define F_OBS V_OBS(1U) +#define S_T6_SDOVRDEN 15 +#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN) +#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U) + +#define S_BSOUTN 7 +#define V_BSOUTN(x) ((x) << S_BSOUTN) +#define F_BSOUTN V_BSOUTN(1U) + +#define S_BSOUTP 6 +#define V_BSOUTP(x) ((x) << S_BSOUTP) +#define F_BSOUTP V_BSOUTP(1U) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_CONFIGURATION_MODE 0x3100 + +#define S_T6_T5_TX_RXLOOP 5 +#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP) +#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U) + +#define S_T6_T5_TX_BWSEL 2 +#define M_T6_T5_TX_BWSEL 0x3U +#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL) +#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_TEST_CONTROL 0x3104 + +#define S_T6_ERROR 9 +#define V_T6_ERROR(x) ((x) << S_T6_ERROR) +#define F_T6_ERROR V_T6_ERROR(1U) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_COEFFICIENT_CONTROL 0x3108 #define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_MODE_CONTROL 0x310c #define A_MAC_PORT_TX_LINKB_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3110 #define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3114 #define A_MAC_PORT_TX_LINKB_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3118 + +#define S_T6_CALSSTN 8 +#define M_T6_CALSSTN 0x3fU +#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN) +#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN) + +#define S_T6_CALSSTP 0 +#define M_T6_CALSSTP 0x3fU +#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP) +#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x311c + +#define S_T6_DRTOL 2 +#define M_T6_DRTOL 0x7U +#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL) +#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT 0x3120 + +#define S_T6_NXTT0 0 +#define M_T6_NXTT0 0x3fU +#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0) +#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT 0x3124 #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT 0x3128 + +#define S_T6_NXTT2 0 +#define M_T6_NXTT2 0x3fU +#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2) +#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2) + +#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_3_COEFFICIENT 0x312c #define A_MAC_PORT_TX_LINKB_TRANSMIT_AMPLITUDE 0x3130 #define A_MAC_PORT_TX_LINKB_TRANSMIT_POLARITY 0x3134 + +#define S_T6_NXTPOL 0 +#define M_T6_NXTPOL 0xfU +#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL) +#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3138 + +#define S_T6_C0UPDT 6 +#define M_T6_C0UPDT 0x3U +#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT) +#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT) + +#define S_T6_C2UPDT 2 +#define M_T6_C2UPDT 0x3U +#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT) +#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT) + +#define S_T6_C1UPDT 0 +#define M_T6_C1UPDT 0x3U +#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT) +#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x313c + +#define S_T6_C0STAT 6 +#define M_T6_C0STAT 0x3U +#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT) +#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT) + +#define S_T6_C2STAT 2 +#define M_T6_C2STAT 0x3U +#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT) +#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT) + +#define S_T6_C1STAT 0 +#define M_T6_C1STAT 0x3U +#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT) +#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3140 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3140 #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3144 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3144 #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3148 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3148 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x314c +#define A_MAC_PORT_TX_LINKB_TRANSMIT_APPLIED_TUNE_REGISTER 0x3150 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3158 #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3160 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_4X_SEGMENT_APPLIED 0x3160 #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3164 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_2X_SEGMENT_APPLIED 0x3164 #define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3168 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_1X_SEGMENT_APPLIED 0x3168 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x316c #define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3170 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3170 #define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3174 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3174 #define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3178 #define A_MAC_PORT_TX_LINKB_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x317c + +#define S_T6_XADDR 1 +#define M_T6_XADDR 0x1fU +#define V_T6_XADDR(x) ((x) << S_T6_XADDR) +#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3180 #define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3184 #define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3188 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3188 #define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x318c +#define A_MAC_PORT_TX_LINKB_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x318c #define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_OVERRIDE 0x3190 #define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_APPLIED 0x3194 #define A_MAC_PORT_TX_LINKB_TRANSMIT_DCC_TIME_OUT 0x3198 #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AZ_CONTROL 0x319c +#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_CONTROL 0x31a0 + +#define S_T6_DCCTIMEEN 13 +#define M_T6_DCCTIMEEN 0x3U +#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN) +#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN) + +#define S_T6_DCCLOCK 11 +#define M_T6_DCCLOCK 0x3U +#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK) +#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK) + +#define S_T6_DCCOFFSET 8 +#define M_T6_DCCOFFSET 0x7U +#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET) +#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET) + +#define S_TX_LINKB_DCCSTEP_CTL 6 +#define M_TX_LINKB_DCCSTEP_CTL 0x3U +#define V_TX_LINKB_DCCSTEP_CTL(x) ((x) << S_TX_LINKB_DCCSTEP_CTL) +#define G_TX_LINKB_DCCSTEP_CTL(x) (((x) >> S_TX_LINKB_DCCSTEP_CTL) & M_TX_LINKB_DCCSTEP_CTL) + +#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_OVERRIDE 0x31a4 +#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_APPLIED 0x31a8 +#define A_T6_MAC_PORT_TX_LINKB_TRANSMIT_DCC_TIME_OUT 0x31ac +#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SIGN_OVERRIDE 0x31c0 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_4X_OVERRIDE 0x31c8 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_2X_OVERRIDE 0x31cc +#define A_MAC_PORT_TX_LINKB_TRANSMIT_SEGMENT_1X_OVERRIDE 0x31d0 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x31d8 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x31dc +#define A_MAC_PORT_TX_LINKB_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x31e0 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_5 0x31ec #define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_4 0x31f0 + +#define S_T6_SDOVRD 0 +#define M_T6_SDOVRD 0xffffU +#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD) +#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_3 0x31f4 #define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_2 0x31f8 #define A_MAC_PORT_TX_LINKB_TRANSMIT_MACRO_TEST_CONTROL_1 0x31fc + +#define S_T6_SDOVRDEN 15 +#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN) +#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U) + #define A_MAC_PORT_RX_LINKA_RECEIVER_CONFIGURATION_MODE 0x3200 #define S_T5_RX_LINKEN 15 @@ -37010,6 +52614,10 @@ #define V_T5_RX_RTSEL(x) ((x) << S_T5_RX_RTSEL) #define G_T5_RX_RTSEL(x) (((x) >> S_T5_RX_RTSEL) & M_T5_RX_RTSEL) +#define S_T5_RX_MODE8023AZ 8 +#define V_T5_RX_MODE8023AZ(x) ((x) << S_T5_RX_MODE8023AZ) +#define F_T5_RX_MODE8023AZ V_T5_RX_MODE8023AZ(1U) + #define A_MAC_PORT_RX_LINKA_RECEIVER_TEST_CONTROL 0x3204 #define S_FERRST 10 @@ -37041,6 +52649,20 @@ #define V_PATSEL(x) ((x) << S_PATSEL) #define G_PATSEL(x) (((x) >> S_PATSEL) & M_PATSEL) +#define S_APLYDCD 15 +#define V_APLYDCD(x) ((x) << S_APLYDCD) +#define F_APLYDCD V_APLYDCD(1U) + +#define S_PPOL 13 +#define M_PPOL 0x3U +#define V_PPOL(x) ((x) << S_PPOL) +#define G_PPOL(x) (((x) >> S_PPOL) & M_PPOL) + +#define S_PCLKSEL 11 +#define M_PCLKSEL 0x3U +#define V_PCLKSEL(x) ((x) << S_PCLKSEL) +#define G_PCLKSEL(x) (((x) >> S_PCLKSEL) & M_PCLKSEL) + #define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_CONTROL 0x3208 #define S_RSTUCK 3 @@ -37060,6 +52682,30 @@ #define F_SSCEN V_SSCEN(1U) #define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_OFFSET_CONTROL 0x320c + +#define S_H1ANOFST 12 +#define M_H1ANOFST 0xfU +#define V_H1ANOFST(x) ((x) << S_H1ANOFST) +#define G_H1ANOFST(x) (((x) >> S_H1ANOFST) & M_H1ANOFST) + +#define S_T6_TMSCAL 8 +#define M_T6_TMSCAL 0x3U +#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL) +#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL) + +#define S_T6_APADJ 7 +#define V_T6_APADJ(x) ((x) << S_T6_APADJ) +#define F_T6_APADJ V_T6_APADJ(1U) + +#define S_T6_RSEL 6 +#define V_T6_RSEL(x) ((x) << S_T6_RSEL) +#define F_T6_RSEL V_T6_RSEL(1U) + +#define S_T6_PHOFFS 0 +#define M_T6_PHOFFS 0x3fU +#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS) +#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS) + #define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_POSITION_1 0x3210 #define S_ROT00 0 @@ -37067,6 +52713,16 @@ #define V_ROT00(x) ((x) << S_ROT00) #define G_ROT00(x) (((x) >> S_ROT00) & M_ROT00) +#define S_ROTA 8 +#define M_ROTA 0x3fU +#define V_ROTA(x) ((x) << S_ROTA) +#define G_ROTA(x) (((x) >> S_ROTA) & M_ROTA) + +#define S_ROTD 0 +#define M_ROTD 0x3fU +#define V_ROTD(x) ((x) << S_ROTD) +#define G_ROTD(x) (((x) >> S_ROTD) & M_ROTD) + #define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_POSITION_2 0x3214 #define S_FREQFW 8 @@ -37078,7 +52734,23 @@ #define V_FWSNAP(x) ((x) << S_FWSNAP) #define F_FWSNAP V_FWSNAP(1U) +#define S_ROTE 0 +#define M_ROTE 0x3fU +#define V_ROTE(x) ((x) << S_ROTE) +#define G_ROTE(x) (((x) >> S_ROTE) & M_ROTE) + #define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3218 + +#define S_RAOFFF 8 +#define M_RAOFFF 0xfU +#define V_RAOFFF(x) ((x) << S_RAOFFF) +#define G_RAOFFF(x) (((x) >> S_RAOFFF) & M_RAOFFF) + +#define S_RAOFF 0 +#define M_RAOFF 0x1fU +#define V_RAOFF(x) ((x) << S_RAOFF) +#define G_RAOFF(x) (((x) >> S_RAOFF) & M_RAOFF) + #define A_MAC_PORT_RX_LINKA_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x321c #define S_RBOOFF 10 @@ -37092,6 +52764,12 @@ #define G_RBEOFF(x) (((x) >> S_RBEOFF) & M_RBEOFF) #define A_MAC_PORT_RX_LINKA_DFE_CONTROL 0x3220 + +#define S_T6_SPIFMT 8 +#define M_T6_SPIFMT 0xfU +#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT) +#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT) + #define A_MAC_PORT_RX_LINKA_DFE_SAMPLE_SNAPSHOT_1 0x3224 #define S_T5BYTE1 8 @@ -37129,7 +52807,30 @@ #define V_T5_RX_ASAMP(x) ((x) << S_T5_RX_ASAMP) #define G_T5_RX_ASAMP(x) (((x) >> S_T5_RX_ASAMP) & M_T5_RX_ASAMP) +#define S_REQWOV 15 +#define V_REQWOV(x) ((x) << S_REQWOV) +#define F_REQWOV V_REQWOV(1U) + +#define S_RASEL 11 +#define M_RASEL 0x7U +#define V_RASEL(x) ((x) << S_RASEL) +#define G_RASEL(x) (((x) >> S_RASEL) & M_RASEL) + #define A_MAC_PORT_RX_LINKA_RECEIVER_VGA_CONTROL_1 0x322c + +#define S_T6_WRAPSEL 15 +#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL) +#define F_T6_WRAPSEL V_T6_WRAPSEL(1U) + +#define S_ACTL 14 +#define V_ACTL(x) ((x) << S_ACTL) +#define F_ACTL V_ACTL(1U) + +#define S_T6_PEAK 9 +#define M_T6_PEAK 0x1fU +#define V_T6_PEAK(x) ((x) << S_T6_PEAK) +#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK) + #define A_MAC_PORT_RX_LINKA_RECEIVER_VGA_CONTROL_2 0x3230 #define S_T5SHORTV 10 @@ -37141,6 +52842,37 @@ #define V_T5VGAIN(x) ((x) << S_T5VGAIN) #define G_T5VGAIN(x) (((x) >> S_T5VGAIN) & M_T5VGAIN) +#define S_FVOFFSKP 15 +#define V_FVOFFSKP(x) ((x) << S_FVOFFSKP) +#define F_FVOFFSKP V_FVOFFSKP(1U) + +#define S_FGAINCHK 14 +#define V_FGAINCHK(x) ((x) << S_FGAINCHK) +#define F_FGAINCHK V_FGAINCHK(1U) + +#define S_FH1ACAL 13 +#define V_FH1ACAL(x) ((x) << S_FH1ACAL) +#define F_FH1ACAL V_FH1ACAL(1U) + +#define S_FH1AFLTR 11 +#define M_FH1AFLTR 0x3U +#define V_FH1AFLTR(x) ((x) << S_FH1AFLTR) +#define G_FH1AFLTR(x) (((x) >> S_FH1AFLTR) & M_FH1AFLTR) + +#define S_WGAIN 8 +#define M_WGAIN 0x3U +#define V_WGAIN(x) ((x) << S_WGAIN) +#define G_WGAIN(x) (((x) >> S_WGAIN) & M_WGAIN) + +#define S_GAIN_STAT 7 +#define V_GAIN_STAT(x) ((x) << S_GAIN_STAT) +#define F_GAIN_STAT V_GAIN_STAT(1U) + +#define S_T6_T5VGAIN 0 +#define M_T6_T5VGAIN 0x7fU +#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN) +#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN) + #define A_MAC_PORT_RX_LINKA_RECEIVER_VGA_CONTROL_3 0x3234 #define A_MAC_PORT_RX_LINKA_RECEIVER_DQCC_CONTROL_1 0x3238 @@ -37159,6 +52891,34 @@ #define V_DUTYI(x) ((x) << S_DUTYI) #define G_DUTYI(x) (((x) >> S_DUTYI) & M_DUTYI) +#define A_MAC_PORT_RX_LINKA_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3238 + +#define S_PMCFG 6 +#define M_PMCFG 0x3U +#define V_PMCFG(x) ((x) << S_PMCFG) +#define G_PMCFG(x) (((x) >> S_PMCFG) & M_PMCFG) + +#define S_PMOFFTIME 0 +#define M_PMOFFTIME 0x3fU +#define V_PMOFFTIME(x) ((x) << S_PMOFFTIME) +#define G_PMOFFTIME(x) (((x) >> S_PMOFFTIME) & M_PMOFFTIME) + +#define A_MAC_PORT_RX_LINKA_RECEIVER_IQAMP_CONTROL_1 0x323c + +#define S_SELI 9 +#define V_SELI(x) ((x) << S_SELI) +#define F_SELI V_SELI(1U) + +#define S_SERVREF 5 +#define M_SERVREF 0x7U +#define V_SERVREF(x) ((x) << S_SERVREF) +#define G_SERVREF(x) (((x) >> S_SERVREF) & M_SERVREF) + +#define S_IQAMP 0 +#define M_IQAMP 0x1fU +#define V_IQAMP(x) ((x) << S_IQAMP) +#define G_IQAMP(x) (((x) >> S_IQAMP) & M_IQAMP) + #define A_MAC_PORT_RX_LINKA_RECEIVER_DQCC_CONTROL_3 0x3240 #define S_DTHR 8 @@ -37171,8 +52931,41 @@ #define V_SNUL(x) ((x) << S_SNUL) #define G_SNUL(x) (((x) >> S_SNUL) & M_SNUL) +#define A_MAC_PORT_RX_LINKA_RECEIVER_IQAMP_CONTROL_2 0x3240 +#define A_MAC_PORT_RX_LINKA_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3244 + +#define S_SAVEADAC 8 +#define V_SAVEADAC(x) ((x) << S_SAVEADAC) +#define F_SAVEADAC V_SAVEADAC(1U) + +#define S_LOAD2 7 +#define V_LOAD2(x) ((x) << S_LOAD2) +#define F_LOAD2 V_LOAD2(1U) + +#define S_LOAD1 6 +#define V_LOAD1(x) ((x) << S_LOAD1) +#define F_LOAD1 V_LOAD1(1U) + +#define S_WRTACC2 5 +#define V_WRTACC2(x) ((x) << S_WRTACC2) +#define F_WRTACC2 V_WRTACC2(1U) + +#define S_WRTACC1 4 +#define V_WRTACC1(x) ((x) << S_WRTACC1) +#define F_WRTACC1 V_WRTACC1(1U) + +#define S_SELAPAN 3 +#define V_SELAPAN(x) ((x) << S_SELAPAN) +#define F_SELAPAN V_SELAPAN(1U) + +#define S_DASEL 0 +#define M_DASEL 0x7U +#define V_DASEL(x) ((x) << S_DASEL) +#define G_DASEL(x) (((x) >> S_DASEL) & M_DASEL) + #define A_MAC_PORT_RX_LINKA_RECEIVER_DACAP_AND_DACAN 0x3248 #define A_MAC_PORT_RX_LINKA_RECEIVER_DACA_MIN_AND_DACAZ 0x324c +#define A_MAC_PORT_RX_LINKA_RECEIVER_DACA_MIN 0x324c #define A_MAC_PORT_RX_LINKA_RECEIVER_ADAC_CONTROL 0x3250 #define S_ADSN_READWRITE 8 @@ -37183,6 +52976,61 @@ #define V_ADSN_READONLY(x) ((x) << S_ADSN_READONLY) #define F_ADSN_READONLY V_ADSN_READONLY(1U) +#define S_ADAC2 8 +#define M_ADAC2 0xffU +#define V_ADAC2(x) ((x) << S_ADAC2) +#define G_ADAC2(x) (((x) >> S_ADAC2) & M_ADAC2) + +#define S_ADAC1 0 +#define M_ADAC1 0xffU +#define V_ADAC1(x) ((x) << S_ADAC1) +#define G_ADAC1(x) (((x) >> S_ADAC1) & M_ADAC1) + +#define A_MAC_PORT_RX_LINKA_RECEIVER_AC_COUPLING_CONTROL 0x3254 + +#define S_FACCPLDYN 13 +#define V_FACCPLDYN(x) ((x) << S_FACCPLDYN) +#define F_FACCPLDYN V_FACCPLDYN(1U) + +#define S_ACCPLGAIN 10 +#define M_ACCPLGAIN 0x7U +#define V_ACCPLGAIN(x) ((x) << S_ACCPLGAIN) +#define G_ACCPLGAIN(x) (((x) >> S_ACCPLGAIN) & M_ACCPLGAIN) + +#define S_ACCPLREF 8 +#define M_ACCPLREF 0x3U +#define V_ACCPLREF(x) ((x) << S_ACCPLREF) +#define G_ACCPLREF(x) (((x) >> S_ACCPLREF) & M_ACCPLREF) + +#define S_ACCPLSTEP 6 +#define M_ACCPLSTEP 0x3U +#define V_ACCPLSTEP(x) ((x) << S_ACCPLSTEP) +#define G_ACCPLSTEP(x) (((x) >> S_ACCPLSTEP) & M_ACCPLSTEP) + +#define S_ACCPLASTEP 1 +#define M_ACCPLASTEP 0x1fU +#define V_ACCPLASTEP(x) ((x) << S_ACCPLASTEP) +#define G_ACCPLASTEP(x) (((x) >> S_ACCPLASTEP) & M_ACCPLASTEP) + +#define S_FACCPL 0 +#define V_FACCPL(x) ((x) << S_FACCPL) +#define F_FACCPL V_FACCPL(1U) + +#define A_MAC_PORT_RX_LINKA_RECEIVER_AC_COUPLING_VALUE 0x3258 + +#define S_ACCPLMEANS 15 +#define V_ACCPLMEANS(x) ((x) << S_ACCPLMEANS) +#define F_ACCPLMEANS V_ACCPLMEANS(1U) + +#define S_CDROVREN 8 +#define V_CDROVREN(x) ((x) << S_CDROVREN) +#define F_CDROVREN V_CDROVREN(1U) + +#define S_ACCPLBIAS 0 +#define M_ACCPLBIAS 0xffU +#define V_ACCPLBIAS(x) ((x) << S_ACCPLBIAS) +#define G_ACCPLBIAS(x) (((x) >> S_ACCPLBIAS) & M_ACCPLBIAS) + #define A_MAC_PORT_RX_LINKA_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x325c #define S_H1O2 8 @@ -37195,6 +53043,13 @@ #define V_H1E2(x) ((x) << S_H1E2) #define G_H1E2(x) (((x) >> S_H1E2) & M_H1E2) +#define A_MAC_PORT_RX_LINKA_DFE_H1H2H3_LOCAL_OFFSET 0x325c + +#define S_H123CH 0 +#define M_H123CH 0x3fU +#define V_H123CH(x) ((x) << S_H123CH) +#define G_H123CH(x) (((x) >> S_H123CH) & M_H123CH) + #define A_MAC_PORT_RX_LINKA_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3260 #define S_H1O3 8 @@ -37207,6 +53062,18 @@ #define V_H1E3(x) ((x) << S_H1E3) #define G_H1E3(x) (((x) >> S_H1E3) & M_H1E3) +#define A_MAC_PORT_RX_LINKA_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3260 + +#define S_H1OX 8 +#define M_H1OX 0x3fU +#define V_H1OX(x) ((x) << S_H1OX) +#define G_H1OX(x) (((x) >> S_H1OX) & M_H1OX) + +#define S_H1EX 0 +#define M_H1EX 0x3fU +#define V_H1EX(x) ((x) << S_H1EX) +#define G_H1EX(x) (((x) >> S_H1EX) & M_H1EX) + #define A_MAC_PORT_RX_LINKA_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3264 #define S_H1O4 8 @@ -37219,13 +53086,107 @@ #define V_H1E4(x) ((x) << S_H1E4) #define G_H1E4(x) (((x) >> S_H1E4) & M_H1E4) +#define A_MAC_PORT_RX_LINKA_PEAKED_INTEGRATOR 0x3264 + +#define S_PILOCK 10 +#define V_PILOCK(x) ((x) << S_PILOCK) +#define F_PILOCK V_PILOCK(1U) + +#define S_UNPKPKA 2 +#define M_UNPKPKA 0x3fU +#define V_UNPKPKA(x) ((x) << S_UNPKPKA) +#define G_UNPKPKA(x) (((x) >> S_UNPKPKA) & M_UNPKPKA) + +#define S_UNPKVGA 0 +#define M_UNPKVGA 0x3U +#define V_UNPKVGA(x) ((x) << S_UNPKVGA) +#define G_UNPKVGA(x) (((x) >> S_UNPKVGA) & M_UNPKVGA) + +#define A_MAC_PORT_RX_LINKA_CDR_ANALOG_SWITCH 0x3268 + +#define S_OVRAC 15 +#define V_OVRAC(x) ((x) << S_OVRAC) +#define F_OVRAC V_OVRAC(1U) + +#define S_OVRPK 14 +#define V_OVRPK(x) ((x) << S_OVRPK) +#define F_OVRPK V_OVRPK(1U) + +#define S_OVRTAILS 12 +#define M_OVRTAILS 0x3U +#define V_OVRTAILS(x) ((x) << S_OVRTAILS) +#define G_OVRTAILS(x) (((x) >> S_OVRTAILS) & M_OVRTAILS) + +#define S_OVRTAILV 9 +#define M_OVRTAILV 0x7U +#define V_OVRTAILV(x) ((x) << S_OVRTAILV) +#define G_OVRTAILV(x) (((x) >> S_OVRTAILV) & M_OVRTAILV) + +#define S_OVRCAP 8 +#define V_OVRCAP(x) ((x) << S_OVRCAP) +#define F_OVRCAP V_OVRCAP(1U) + +#define S_OVRDCDPRE 7 +#define V_OVRDCDPRE(x) ((x) << S_OVRDCDPRE) +#define F_OVRDCDPRE V_OVRDCDPRE(1U) + +#define S_OVRDCDPST 6 +#define V_OVRDCDPST(x) ((x) << S_OVRDCDPST) +#define F_OVRDCDPST V_OVRDCDPST(1U) + +#define S_DCVSCTMODE 2 +#define V_DCVSCTMODE(x) ((x) << S_DCVSCTMODE) +#define F_DCVSCTMODE V_DCVSCTMODE(1U) + +#define S_CDRANLGSW 0 +#define M_CDRANLGSW 0x3U +#define V_CDRANLGSW(x) ((x) << S_CDRANLGSW) +#define G_CDRANLGSW(x) (((x) >> S_CDRANLGSW) & M_CDRANLGSW) + +#define A_MAC_PORT_RX_LINKA_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x326c + +#define S_PFLAG 5 +#define M_PFLAG 0x3U +#define V_PFLAG(x) ((x) << S_PFLAG) +#define G_PFLAG(x) (((x) >> S_PFLAG) & M_PFLAG) + #define A_MAC_PORT_RX_LINKA_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3270 #define S_DPCMD 14 #define V_DPCMD(x) ((x) << S_DPCMD) #define F_DPCMD V_DPCMD(1U) +#define S_DACCLIP 15 +#define V_DACCLIP(x) ((x) << S_DACCLIP) +#define F_DACCLIP V_DACCLIP(1U) + +#define S_DPCFRZ 14 +#define V_DPCFRZ(x) ((x) << S_DPCFRZ) +#define F_DPCFRZ V_DPCFRZ(1U) + +#define S_DPCLKNQ 11 +#define V_DPCLKNQ(x) ((x) << S_DPCLKNQ) +#define F_DPCLKNQ V_DPCLKNQ(1U) + +#define S_DPCWDFE 10 +#define V_DPCWDFE(x) ((x) << S_DPCWDFE) +#define F_DPCWDFE V_DPCWDFE(1U) + +#define S_DPCWPK 9 +#define V_DPCWPK(x) ((x) << S_DPCWPK) +#define F_DPCWPK V_DPCWPK(1U) + #define A_MAC_PORT_RX_LINKA_DYNAMIC_DATA_CENTERING_DDC 0x3274 + +#define S_VIEWSCAN 4 +#define V_VIEWSCAN(x) ((x) << S_VIEWSCAN) +#define F_VIEWSCAN V_VIEWSCAN(1U) + +#define S_T6_ODEC 0 +#define M_T6_ODEC 0xfU +#define V_T6_ODEC(x) ((x) << S_T6_ODEC) +#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC) + #define A_MAC_PORT_RX_LINKA_RECEIVER_INTERNAL_STATUS 0x3278 #define S_T5BER6VAL 15 @@ -37284,6 +53245,18 @@ #define V_T5OCCMP(x) ((x) << S_T5OCCMP) #define F_T5OCCMP V_T5OCCMP(1U) +#define S_RX_LINKA_ACCCMP_RIS 11 +#define V_RX_LINKA_ACCCMP_RIS(x) ((x) << S_RX_LINKA_ACCCMP_RIS) +#define F_RX_LINKA_ACCCMP_RIS V_RX_LINKA_ACCCMP_RIS(1U) + +#define S_DCCCMP 10 +#define V_DCCCMP(x) ((x) << S_DCCCMP) +#define F_DCCCMP V_DCCCMP(1U) + +#define S_T5IQCMP 1 +#define V_T5IQCMP(x) ((x) << S_T5IQCMP) +#define F_T5IQCMP V_T5IQCMP(1U) + #define A_MAC_PORT_RX_LINKA_DFE_FUNCTION_CONTROL_1 0x327c #define S_FLOFF 1 @@ -37332,6 +53305,42 @@ #define V_FDQCC(x) ((x) << S_FDQCC) #define F_FDQCC V_FDQCC(1U) +#define S_FDCCAL 14 +#define V_FDCCAL(x) ((x) << S_FDCCAL) +#define F_FDCCAL V_FDCCAL(1U) + +#define S_FROTCAL 13 +#define V_FROTCAL(x) ((x) << S_FROTCAL) +#define F_FROTCAL V_FROTCAL(1U) + +#define S_FIQAMP 12 +#define V_FIQAMP(x) ((x) << S_FIQAMP) +#define F_FIQAMP V_FIQAMP(1U) + +#define S_FRPTCALF 11 +#define V_FRPTCALF(x) ((x) << S_FRPTCALF) +#define F_FRPTCALF V_FRPTCALF(1U) + +#define S_FINTCALGS 10 +#define V_FINTCALGS(x) ((x) << S_FINTCALGS) +#define F_FINTCALGS V_FINTCALGS(1U) + +#define S_FDCC 9 +#define V_FDCC(x) ((x) << S_FDCC) +#define F_FDCC V_FDCC(1U) + +#define S_FDCD 7 +#define V_FDCD(x) ((x) << S_FDCD) +#define F_FDCD V_FDCD(1U) + +#define S_FINTRCALDYN 1 +#define V_FINTRCALDYN(x) ((x) << S_FINTRCALDYN) +#define F_FINTRCALDYN V_FINTRCALDYN(1U) + +#define S_FQCC 0 +#define V_FQCC(x) ((x) << S_FQCC) +#define F_FQCC V_FQCC(1U) + #define A_MAC_PORT_RX_LINKA_DFE_OFFSET_EVN1_EVN2 0x3284 #define S_LOFE2S_READWRITE 16 @@ -37361,6 +53370,31 @@ #define V_LOFE1(x) ((x) << S_LOFE1) #define G_LOFE1(x) (((x) >> S_LOFE1) & M_LOFE1) +#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_CHANNEL 0x3284 + +#define S_QCCIND 13 +#define V_QCCIND(x) ((x) << S_QCCIND) +#define F_QCCIND V_QCCIND(1U) + +#define S_DCDIND 10 +#define M_DCDIND 0x7U +#define V_DCDIND(x) ((x) << S_DCDIND) +#define G_DCDIND(x) (((x) >> S_DCDIND) & M_DCDIND) + +#define S_DCCIND 8 +#define M_DCCIND 0x3U +#define V_DCCIND(x) ((x) << S_DCCIND) +#define G_DCCIND(x) (((x) >> S_DCCIND) & M_DCCIND) + +#define S_CFSEL 5 +#define V_CFSEL(x) ((x) << S_CFSEL) +#define F_CFSEL V_CFSEL(1U) + +#define S_LOFCH 0 +#define M_LOFCH 0x1fU +#define V_LOFCH(x) ((x) << S_LOFCH) +#define G_LOFCH(x) (((x) >> S_LOFCH) & M_LOFCH) + #define A_MAC_PORT_RX_LINKA_DFE_OFFSET_ODD1_ODD2 0x3288 #define S_LOFO2S_READWRITE 15 @@ -37389,6 +53423,18 @@ #define V_LOFO1(x) ((x) << S_LOFO1) #define G_LOFO1(x) (((x) >> S_LOFO1) & M_LOFO1) +#define A_MAC_PORT_RX_LINKA_DFE_OFFSET_VALUE 0x3288 + +#define S_LOFU 8 +#define M_LOFU 0x7fU +#define V_LOFU(x) ((x) << S_LOFU) +#define G_LOFU(x) (((x) >> S_LOFU) & M_LOFU) + +#define S_LOFL 0 +#define M_LOFL 0x7fU +#define V_LOFL(x) ((x) << S_LOFL) +#define G_LOFL(x) (((x) >> S_LOFL) & M_LOFL) + #define A_MAC_PORT_RX_LINKA_DFE_OFFSET_EVN3_EVN4 0x328c #define S_LOFE4S_READWRITE 15 @@ -37417,6 +53463,42 @@ #define V_LOFE3(x) ((x) << S_LOFE3) #define G_LOFE3(x) (((x) >> S_LOFE3) & M_LOFE3) +#define A_MAC_PORT_RX_LINKA_H_COEFFICIENBT_BIST 0x328c + +#define S_HBISTMAN 12 +#define V_HBISTMAN(x) ((x) << S_HBISTMAN) +#define F_HBISTMAN V_HBISTMAN(1U) + +#define S_HBISTRES 11 +#define V_HBISTRES(x) ((x) << S_HBISTRES) +#define F_HBISTRES V_HBISTRES(1U) + +#define S_HBISTSP 8 +#define M_HBISTSP 0x7U +#define V_HBISTSP(x) ((x) << S_HBISTSP) +#define G_HBISTSP(x) (((x) >> S_HBISTSP) & M_HBISTSP) + +#define S_HBISTEN 7 +#define V_HBISTEN(x) ((x) << S_HBISTEN) +#define F_HBISTEN V_HBISTEN(1U) + +#define S_HBISTRST 6 +#define V_HBISTRST(x) ((x) << S_HBISTRST) +#define F_HBISTRST V_HBISTRST(1U) + +#define S_HCOMP 5 +#define V_HCOMP(x) ((x) << S_HCOMP) +#define F_HCOMP V_HCOMP(1U) + +#define S_HPASS 4 +#define V_HPASS(x) ((x) << S_HPASS) +#define F_HPASS V_HPASS(1U) + +#define S_HSEL 0 +#define M_HSEL 0xfU +#define V_HSEL(x) ((x) << S_HSEL) +#define G_HSEL(x) (((x) >> S_HSEL) & M_HSEL) + #define A_MAC_PORT_RX_LINKA_DFE_OFFSET_ODD3_ODD4 0x3290 #define S_LOFO4S_READWRITE 15 @@ -37445,6 +53527,30 @@ #define V_LOFO3(x) ((x) << S_LOFO3) #define G_LOFO3(x) (((x) >> S_LOFO3) & M_LOFO3) +#define A_MAC_PORT_RX_LINKA_AC_CAPACITOR_BIST 0x3290 + +#define S_RX_LINKA_ACCCMP_BIST 13 +#define V_RX_LINKA_ACCCMP_BIST(x) ((x) << S_RX_LINKA_ACCCMP_BIST) +#define F_RX_LINKA_ACCCMP_BIST V_RX_LINKA_ACCCMP_BIST(1U) + +#define S_ACCEN 12 +#define V_ACCEN(x) ((x) << S_ACCEN) +#define F_ACCEN V_ACCEN(1U) + +#define S_ACCRST 11 +#define V_ACCRST(x) ((x) << S_ACCRST) +#define F_ACCRST V_ACCRST(1U) + +#define S_ACCIND 8 +#define M_ACCIND 0x7U +#define V_ACCIND(x) ((x) << S_ACCIND) +#define G_ACCIND(x) (((x) >> S_ACCIND) & M_ACCIND) + +#define S_ACCRD 0 +#define M_ACCRD 0xffU +#define V_ACCRD(x) ((x) << S_ACCRD) +#define G_ACCRD(x) (((x) >> S_ACCRD) & M_ACCRD) + #define A_MAC_PORT_RX_LINKA_DFE_E0_AND_E1_OFFSET 0x3294 #define S_T5E1SN_READWRITE 15 @@ -37488,6 +53594,42 @@ #define V_T5LFSEL(x) ((x) << S_T5LFSEL) #define G_T5LFSEL(x) (((x) >> S_T5LFSEL) & M_T5LFSEL) +#define A_MAC_PORT_RX_LINKA_RECEIVER_LOFF_CONTROL_REGISTER 0x3298 + +#define S_LFREG 15 +#define V_LFREG(x) ((x) << S_LFREG) +#define F_LFREG V_LFREG(1U) + +#define S_LFRC 14 +#define V_LFRC(x) ((x) << S_LFRC) +#define F_LFRC V_LFRC(1U) + +#define S_LGIDLE 13 +#define V_LGIDLE(x) ((x) << S_LGIDLE) +#define F_LGIDLE V_LGIDLE(1U) + +#define S_LFTGT 8 +#define M_LFTGT 0x1fU +#define V_LFTGT(x) ((x) << S_LFTGT) +#define G_LFTGT(x) (((x) >> S_LFTGT) & M_LFTGT) + +#define S_LGTGT 7 +#define V_LGTGT(x) ((x) << S_LGTGT) +#define F_LGTGT V_LGTGT(1U) + +#define S_LRDY 6 +#define V_LRDY(x) ((x) << S_LRDY) +#define F_LRDY V_LRDY(1U) + +#define S_LIDLE 5 +#define V_LIDLE(x) ((x) << S_LIDLE) +#define F_LIDLE V_LIDLE(1U) + +#define S_LCURR 0 +#define M_LCURR 0x1fU +#define V_LCURR(x) ((x) << S_LCURR) +#define G_LCURR(x) (((x) >> S_LCURR) & M_LCURR) + #define A_MAC_PORT_RX_LINKA_RECEIVER_SIGDET_CONTROL 0x329c #define S_OFFSN_READWRITE 14 @@ -37507,6 +53649,11 @@ #define V_SDACDC(x) ((x) << S_SDACDC) #define F_SDACDC V_SDACDC(1U) +#define S_OFFSN 13 +#define M_OFFSN 0x3U +#define V_OFFSN(x) ((x) << S_OFFSN) +#define G_OFFSN(x) (((x) >> S_OFFSN) & M_OFFSN) + #define A_MAC_PORT_RX_LINKA_RECEIVER_ANALOG_CONTROL_SWITCH 0x32a0 #define S_T5_RX_SETHDIS 7 @@ -37538,6 +53685,43 @@ #define V_T5_RX_VTERM(x) ((x) << S_T5_RX_VTERM) #define G_T5_RX_VTERM(x) (((x) >> S_T5_RX_VTERM) & M_T5_RX_VTERM) +#define S_RX_OVRSUMPD 15 +#define V_RX_OVRSUMPD(x) ((x) << S_RX_OVRSUMPD) +#define F_RX_OVRSUMPD V_RX_OVRSUMPD(1U) + +#define S_RX_OVRKBPD 14 +#define V_RX_OVRKBPD(x) ((x) << S_RX_OVRKBPD) +#define F_RX_OVRKBPD V_RX_OVRKBPD(1U) + +#define S_RX_OVRDIVPD 13 +#define V_RX_OVRDIVPD(x) ((x) << S_RX_OVRDIVPD) +#define F_RX_OVRDIVPD V_RX_OVRDIVPD(1U) + +#define S_RX_OFFVGADIS 12 +#define V_RX_OFFVGADIS(x) ((x) << S_RX_OFFVGADIS) +#define F_RX_OFFVGADIS V_RX_OFFVGADIS(1U) + +#define S_RX_OFFACDIS 11 +#define V_RX_OFFACDIS(x) ((x) << S_RX_OFFACDIS) +#define F_RX_OFFACDIS V_RX_OFFACDIS(1U) + +#define S_RX_VTERM 10 +#define V_RX_VTERM(x) ((x) << S_RX_VTERM) +#define F_RX_VTERM V_RX_VTERM(1U) + +#define S_RX_DISSPY2D 8 +#define V_RX_DISSPY2D(x) ((x) << S_RX_DISSPY2D) +#define F_RX_DISSPY2D V_RX_DISSPY2D(1U) + +#define S_RX_OBSOVEN 7 +#define V_RX_OBSOVEN(x) ((x) << S_RX_OBSOVEN) +#define F_RX_OBSOVEN V_RX_OBSOVEN(1U) + +#define S_RX_LINKANLGSW 0 +#define M_RX_LINKANLGSW 0x7fU +#define V_RX_LINKANLGSW(x) ((x) << S_RX_LINKANLGSW) +#define G_RX_LINKANLGSW(x) (((x) >> S_RX_LINKANLGSW) & M_RX_LINKANLGSW) + #define A_MAC_PORT_RX_LINKA_INTEGRATOR_DAC_OFFSET 0x32a4 #define S_ISTRIMS 14 @@ -37563,6 +53747,21 @@ #define V_INTDAC(x) ((x) << S_INTDAC) #define G_INTDAC(x) (((x) >> S_INTDAC) & M_INTDAC) +#define S_INTDACEGS 13 +#define M_INTDACEGS 0x7U +#define V_INTDACEGS(x) ((x) << S_INTDACEGS) +#define G_INTDACEGS(x) (((x) >> S_INTDACEGS) & M_INTDACEGS) + +#define S_INTDACE 8 +#define M_INTDACE 0x1fU +#define V_INTDACE(x) ((x) << S_INTDACE) +#define G_INTDACE(x) (((x) >> S_INTDACE) & M_INTDACE) + +#define S_INTDACGS 6 +#define M_INTDACGS 0x3U +#define V_INTDACGS(x) ((x) << S_INTDACGS) +#define G_INTDACGS(x) (((x) >> S_INTDACGS) & M_INTDACGS) + #define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_CONTROL 0x32a8 #define S_MINWDTH 5 @@ -37599,6 +53798,29 @@ #define V_T5EMEN(x) ((x) << S_T5EMEN) #define F_T5EMEN V_T5EMEN(1U) +#define S_SMQM 13 +#define M_SMQM 0x7U +#define V_SMQM(x) ((x) << S_SMQM) +#define G_SMQM(x) (((x) >> S_SMQM) & M_SMQM) + +#define S_SMQ 5 +#define M_SMQ 0xffU +#define V_SMQ(x) ((x) << S_SMQ) +#define G_SMQ(x) (((x) >> S_SMQ) & M_SMQ) + +#define S_T6_EMMD 3 +#define M_T6_EMMD 0x3U +#define V_T6_EMMD(x) ((x) << S_T6_EMMD) +#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD) + +#define S_T6_EMBRDY 2 +#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY) +#define F_T6_EMBRDY V_T6_EMBRDY(1U) + +#define S_T6_EMBUMP 1 +#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP) +#define F_T6_EMBUMP V_T6_EMBUMP(1U) + #define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_METRICS_ERROR_COUNT 0x32b0 #define S_EMF8 15 @@ -37622,6 +53844,14 @@ #define V_EMCEN(x) ((x) << S_EMCEN) #define F_EMCEN V_EMCEN(1U) +#define S_EMSF 13 +#define V_EMSF(x) ((x) << S_EMSF) +#define F_EMSF V_EMSF(1U) + +#define S_EMDATA59 12 +#define V_EMDATA59(x) ((x) << S_EMDATA59) +#define F_EMDATA59 V_EMDATA59(1U) + #define A_MAC_PORT_RX_LINKA_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x32b4 #define S_SM2RDY 15 @@ -37644,6 +53874,72 @@ #define V_SM0LEN(x) ((x) << S_SM0LEN) #define G_SM0LEN(x) (((x) >> S_SM0LEN) & M_SM0LEN) +#define A_MAC_PORT_RX_LINKA_DFE_FUNCTION_CONTROL_3 0x32bc + +#define S_FTIMEOUT 15 +#define V_FTIMEOUT(x) ((x) << S_FTIMEOUT) +#define F_FTIMEOUT V_FTIMEOUT(1U) + +#define S_FROTCAL4 14 +#define V_FROTCAL4(x) ((x) << S_FROTCAL4) +#define F_FROTCAL4 V_FROTCAL4(1U) + +#define S_FDCD2 13 +#define V_FDCD2(x) ((x) << S_FDCD2) +#define F_FDCD2 V_FDCD2(1U) + +#define S_FPRBSPOLTOG 12 +#define V_FPRBSPOLTOG(x) ((x) << S_FPRBSPOLTOG) +#define F_FPRBSPOLTOG V_FPRBSPOLTOG(1U) + +#define S_FPRBSOFF2 11 +#define V_FPRBSOFF2(x) ((x) << S_FPRBSOFF2) +#define F_FPRBSOFF2 V_FPRBSOFF2(1U) + +#define S_FDDCAL2 10 +#define V_FDDCAL2(x) ((x) << S_FDDCAL2) +#define F_FDDCAL2 V_FDDCAL2(1U) + +#define S_FDDCFLTR 9 +#define V_FDDCFLTR(x) ((x) << S_FDDCFLTR) +#define F_FDDCFLTR V_FDDCFLTR(1U) + +#define S_FDAC6 8 +#define V_FDAC6(x) ((x) << S_FDAC6) +#define F_FDAC6 V_FDAC6(1U) + +#define S_FDDC5 7 +#define V_FDDC5(x) ((x) << S_FDDC5) +#define F_FDDC5 V_FDDC5(1U) + +#define S_FDDC3456 6 +#define V_FDDC3456(x) ((x) << S_FDDC3456) +#define F_FDDC3456 V_FDDC3456(1U) + +#define S_FSPY2DATA 5 +#define V_FSPY2DATA(x) ((x) << S_FSPY2DATA) +#define F_FSPY2DATA V_FSPY2DATA(1U) + +#define S_FPHSLOCK 4 +#define V_FPHSLOCK(x) ((x) << S_FPHSLOCK) +#define F_FPHSLOCK V_FPHSLOCK(1U) + +#define S_FCLKALGN 3 +#define V_FCLKALGN(x) ((x) << S_FCLKALGN) +#define F_FCLKALGN V_FCLKALGN(1U) + +#define S_FCLKALDYN 2 +#define V_FCLKALDYN(x) ((x) << S_FCLKALDYN) +#define F_FCLKALDYN V_FCLKALDYN(1U) + +#define S_FDFE 1 +#define V_FDFE(x) ((x) << S_FDFE) +#define F_FDFE V_FDFE(1U) + +#define S_FPRBSOFF 0 +#define V_FPRBSOFF(x) ((x) << S_FPRBSOFF) +#define F_FPRBSOFF V_FPRBSOFF(1U) + #define A_MAC_PORT_RX_LINKA_DFE_TAP_ENABLE 0x32c0 #define S_H_EN 1 @@ -37651,7 +53947,21 @@ #define V_H_EN(x) ((x) << S_H_EN) #define G_H_EN(x) (((x) >> S_H_EN) & M_H_EN) +#define A_MAC_PORT_RX_LINKA_DFE_TAP_CONTROL 0x32c0 + +#define S_RX_LINKA_INDEX_DFE_TC 0 +#define M_RX_LINKA_INDEX_DFE_TC 0xfU +#define V_RX_LINKA_INDEX_DFE_TC(x) ((x) << S_RX_LINKA_INDEX_DFE_TC) +#define G_RX_LINKA_INDEX_DFE_TC(x) (((x) >> S_RX_LINKA_INDEX_DFE_TC) & M_RX_LINKA_INDEX_DFE_TC) + #define A_MAC_PORT_RX_LINKA_DFE_H1 0x32c4 +#define A_MAC_PORT_RX_LINKA_DFE_TAP 0x32c4 + +#define S_RX_LINKA_INDEX_DFE_TAP 0 +#define M_RX_LINKA_INDEX_DFE_TAP 0xfU +#define V_RX_LINKA_INDEX_DFE_TAP(x) ((x) << S_RX_LINKA_INDEX_DFE_TAP) +#define G_RX_LINKA_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKA_INDEX_DFE_TAP) & M_RX_LINKA_INDEX_DFE_TAP) + #define A_MAC_PORT_RX_LINKA_DFE_H2 0x32c8 #define S_H2OSN_READWRITE 14 @@ -37878,6 +54188,154 @@ #define V_H12MAG(x) ((x) << S_H12MAG) #define G_H12MAG(x) (((x) >> S_H12MAG) & M_H12MAG) +#define A_MAC_PORT_RX_LINKA_RECEIVER_INTERNAL_STATUS_2 0x32e4 + +#define S_STNDBYSTAT 15 +#define V_STNDBYSTAT(x) ((x) << S_STNDBYSTAT) +#define F_STNDBYSTAT V_STNDBYSTAT(1U) + +#define S_CALSDONE 14 +#define V_CALSDONE(x) ((x) << S_CALSDONE) +#define F_CALSDONE V_CALSDONE(1U) + +#define S_ACISRCCMP 5 +#define V_ACISRCCMP(x) ((x) << S_ACISRCCMP) +#define F_ACISRCCMP V_ACISRCCMP(1U) + +#define S_PRBSOFFCMP 4 +#define V_PRBSOFFCMP(x) ((x) << S_PRBSOFFCMP) +#define F_PRBSOFFCMP V_PRBSOFFCMP(1U) + +#define S_CLKALGNCMP 3 +#define V_CLKALGNCMP(x) ((x) << S_CLKALGNCMP) +#define F_CLKALGNCMP V_CLKALGNCMP(1U) + +#define S_ROTFCMP 2 +#define V_ROTFCMP(x) ((x) << S_ROTFCMP) +#define F_ROTFCMP V_ROTFCMP(1U) + +#define S_DCDCMP 1 +#define V_DCDCMP(x) ((x) << S_DCDCMP) +#define F_DCDCMP V_DCDCMP(1U) + +#define S_QCCCMP 0 +#define V_QCCCMP(x) ((x) << S_QCCCMP) +#define F_QCCCMP V_QCCCMP(1U) + +#define A_MAC_PORT_RX_LINKA_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x32e8 + +#define S_FCSADJ 6 +#define V_FCSADJ(x) ((x) << S_FCSADJ) +#define F_FCSADJ V_FCSADJ(1U) + +#define S_CSIND 3 +#define M_CSIND 0x3U +#define V_CSIND(x) ((x) << S_CSIND) +#define G_CSIND(x) (((x) >> S_CSIND) & M_CSIND) + +#define S_CSVAL 0 +#define M_CSVAL 0x7U +#define V_CSVAL(x) ((x) << S_CSVAL) +#define G_CSVAL(x) (((x) >> S_CSVAL) & M_CSVAL) + +#define A_MAC_PORT_RX_LINKA_RECEIVER_DCD_CONTROL 0x32ec + +#define S_DCDTMDOUT 15 +#define V_DCDTMDOUT(x) ((x) << S_DCDTMDOUT) +#define F_DCDTMDOUT V_DCDTMDOUT(1U) + +#define S_DCDTOEN 14 +#define V_DCDTOEN(x) ((x) << S_DCDTOEN) +#define F_DCDTOEN V_DCDTOEN(1U) + +#define S_DCDLOCK 13 +#define V_DCDLOCK(x) ((x) << S_DCDLOCK) +#define F_DCDLOCK V_DCDLOCK(1U) + +#define S_DCDSTEP 11 +#define M_DCDSTEP 0x3U +#define V_DCDSTEP(x) ((x) << S_DCDSTEP) +#define G_DCDSTEP(x) (((x) >> S_DCDSTEP) & M_DCDSTEP) + +#define S_DCDALTWPDIS 10 +#define V_DCDALTWPDIS(x) ((x) << S_DCDALTWPDIS) +#define F_DCDALTWPDIS V_DCDALTWPDIS(1U) + +#define S_DCDOVRDEN 9 +#define V_DCDOVRDEN(x) ((x) << S_DCDOVRDEN) +#define F_DCDOVRDEN V_DCDOVRDEN(1U) + +#define S_DCCAOVRDEN 8 +#define V_DCCAOVRDEN(x) ((x) << S_DCCAOVRDEN) +#define F_DCCAOVRDEN V_DCCAOVRDEN(1U) + +#define S_DCDSIGN 6 +#define M_DCDSIGN 0x3U +#define V_DCDSIGN(x) ((x) << S_DCDSIGN) +#define G_DCDSIGN(x) (((x) >> S_DCDSIGN) & M_DCDSIGN) + +#define S_DCDAMP 0 +#define M_DCDAMP 0x3fU +#define V_DCDAMP(x) ((x) << S_DCDAMP) +#define G_DCDAMP(x) (((x) >> S_DCDAMP) & M_DCDAMP) + +#define A_MAC_PORT_RX_LINKA_RECEIVER_DCC_CONTROL 0x32f0 + +#define S_PRBSMODE 14 +#define M_PRBSMODE 0x3U +#define V_PRBSMODE(x) ((x) << S_PRBSMODE) +#define G_PRBSMODE(x) (((x) >> S_PRBSMODE) & M_PRBSMODE) + +#define S_RX_LINKA_DCCSTEP_RXCTL 10 +#define M_RX_LINKA_DCCSTEP_RXCTL 0x3U +#define V_RX_LINKA_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKA_DCCSTEP_RXCTL) +#define G_RX_LINKA_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKA_DCCSTEP_RXCTL) & M_RX_LINKA_DCCSTEP_RXCTL) + +#define S_DCCOVRDEN 9 +#define V_DCCOVRDEN(x) ((x) << S_DCCOVRDEN) +#define F_DCCOVRDEN V_DCCOVRDEN(1U) + +#define S_RX_LINKA_DCCLOCK_RXCTL 8 +#define V_RX_LINKA_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKA_DCCLOCK_RXCTL) +#define F_RX_LINKA_DCCLOCK_RXCTL V_RX_LINKA_DCCLOCK_RXCTL(1U) + +#define A_MAC_PORT_RX_LINKA_RECEIVER_QCC_CONTROL 0x32f4 + +#define S_DCCQCCMODE 15 +#define V_DCCQCCMODE(x) ((x) << S_DCCQCCMODE) +#define F_DCCQCCMODE V_DCCQCCMODE(1U) + +#define S_DCCQCCDYN 14 +#define V_DCCQCCDYN(x) ((x) << S_DCCQCCDYN) +#define F_DCCQCCDYN V_DCCQCCDYN(1U) + +#define S_DCCQCCHOLD 13 +#define V_DCCQCCHOLD(x) ((x) << S_DCCQCCHOLD) +#define F_DCCQCCHOLD V_DCCQCCHOLD(1U) + +#define S_QCCSTEP 10 +#define M_QCCSTEP 0x3U +#define V_QCCSTEP(x) ((x) << S_QCCSTEP) +#define G_QCCSTEP(x) (((x) >> S_QCCSTEP) & M_QCCSTEP) + +#define S_QCCOVRDEN 9 +#define V_QCCOVRDEN(x) ((x) << S_QCCOVRDEN) +#define F_QCCOVRDEN V_QCCOVRDEN(1U) + +#define S_QCCLOCK 8 +#define V_QCCLOCK(x) ((x) << S_QCCLOCK) +#define F_QCCLOCK V_QCCLOCK(1U) + +#define S_QCCSIGN 6 +#define M_QCCSIGN 0x3U +#define V_QCCSIGN(x) ((x) << S_QCCSIGN) +#define G_QCCSIGN(x) (((x) >> S_QCCSIGN) & M_QCCSIGN) + +#define S_QCDAMP 0 +#define M_QCDAMP 0x3fU +#define V_QCDAMP(x) ((x) << S_QCDAMP) +#define G_QCDAMP(x) (((x) >> S_QCDAMP) & M_QCDAMP) + #define A_MAC_PORT_RX_LINKA_RECEIVER_MACRO_TEST_CONTROL_2 0x32f8 #define S_DFEDACLSSD 6 @@ -37908,6 +54366,12 @@ #define V_ACJZNT(x) ((x) << S_ACJZNT) #define F_ACJZNT V_ACJZNT(1U) +#define A_MAC_PORT_RX_LINKA_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x32f8 + +#define S_TSTCMP 15 +#define V_TSTCMP(x) ((x) << S_TSTCMP) +#define F_TSTCMP V_TSTCMP(1U) + #define A_MAC_PORT_RX_LINKA_RECEIVER_MACRO_TEST_CONTROL_1 0x32fc #define S_PHSLOCK 10 @@ -37954,49 +54418,168 @@ #define V_MTHOLD(x) ((x) << S_MTHOLD) #define F_MTHOLD V_MTHOLD(1U) +#define S_CALMODEEDGE 14 +#define V_CALMODEEDGE(x) ((x) << S_CALMODEEDGE) +#define F_CALMODEEDGE V_CALMODEEDGE(1U) + +#define S_TESTCAP 13 +#define V_TESTCAP(x) ((x) << S_TESTCAP) +#define F_TESTCAP V_TESTCAP(1U) + +#define S_SNAPEN 12 +#define V_SNAPEN(x) ((x) << S_SNAPEN) +#define F_SNAPEN V_SNAPEN(1U) + +#define S_ASYNCDIR 11 +#define V_ASYNCDIR(x) ((x) << S_ASYNCDIR) +#define F_ASYNCDIR V_ASYNCDIR(1U) + #define A_MAC_PORT_RX_LINKB_RECEIVER_CONFIGURATION_MODE 0x3300 #define A_MAC_PORT_RX_LINKB_RECEIVER_TEST_CONTROL 0x3304 #define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_CONTROL 0x3308 #define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_OFFSET_CONTROL 0x330c + +#define S_T6_TMSCAL 8 +#define M_T6_TMSCAL 0x3U +#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL) +#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL) + +#define S_T6_APADJ 7 +#define V_T6_APADJ(x) ((x) << S_T6_APADJ) +#define F_T6_APADJ V_T6_APADJ(1U) + +#define S_T6_RSEL 6 +#define V_T6_RSEL(x) ((x) << S_T6_RSEL) +#define F_T6_RSEL V_T6_RSEL(1U) + +#define S_T6_PHOFFS 0 +#define M_T6_PHOFFS 0x3fU +#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS) +#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS) + #define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_1 0x3310 #define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_POSITION_2 0x3314 #define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3318 #define A_MAC_PORT_RX_LINKB_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x331c #define A_MAC_PORT_RX_LINKB_DFE_CONTROL 0x3320 + +#define S_T6_SPIFMT 8 +#define M_T6_SPIFMT 0xfU +#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT) +#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT) + #define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_1 0x3324 #define A_MAC_PORT_RX_LINKB_DFE_SAMPLE_SNAPSHOT_2 0x3328 #define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_1 0x332c + +#define S_T6_WRAPSEL 15 +#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL) +#define F_T6_WRAPSEL V_T6_WRAPSEL(1U) + +#define S_T6_PEAK 9 +#define M_T6_PEAK 0x1fU +#define V_T6_PEAK(x) ((x) << S_T6_PEAK) +#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK) + #define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_2 0x3330 + +#define S_T6_T5VGAIN 0 +#define M_T6_T5VGAIN 0x7fU +#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN) +#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN) + #define A_MAC_PORT_RX_LINKB_RECEIVER_VGA_CONTROL_3 0x3334 #define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_1 0x3338 +#define A_MAC_PORT_RX_LINKB_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3338 +#define A_MAC_PORT_RX_LINKB_RECEIVER_IQAMP_CONTROL_1 0x333c #define A_MAC_PORT_RX_LINKB_RECEIVER_DQCC_CONTROL_3 0x3340 +#define A_MAC_PORT_RX_LINKB_RECEIVER_IQAMP_CONTROL_2 0x3340 +#define A_MAC_PORT_RX_LINKB_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3344 #define A_MAC_PORT_RX_LINKB_RECEIVER_DACAP_AND_DACAN 0x3348 #define A_MAC_PORT_RX_LINKB_RECEIVER_DACA_MIN_AND_DACAZ 0x334c +#define A_MAC_PORT_RX_LINKB_RECEIVER_DACA_MIN 0x334c #define A_MAC_PORT_RX_LINKB_RECEIVER_ADAC_CONTROL 0x3350 +#define A_MAC_PORT_RX_LINKB_RECEIVER_AC_COUPLING_CONTROL 0x3354 +#define A_MAC_PORT_RX_LINKB_RECEIVER_AC_COUPLING_VALUE 0x3358 #define A_MAC_PORT_RX_LINKB_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x335c +#define A_MAC_PORT_RX_LINKB_DFE_H1H2H3_LOCAL_OFFSET 0x335c #define A_MAC_PORT_RX_LINKB_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3360 +#define A_MAC_PORT_RX_LINKB_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3360 #define A_MAC_PORT_RX_LINKB_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3364 +#define A_MAC_PORT_RX_LINKB_PEAKED_INTEGRATOR 0x3364 +#define A_MAC_PORT_RX_LINKB_CDR_ANALOG_SWITCH 0x3368 +#define A_MAC_PORT_RX_LINKB_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x336c #define A_MAC_PORT_RX_LINKB_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3370 #define A_MAC_PORT_RX_LINKB_DYNAMIC_DATA_CENTERING_DDC 0x3374 + +#define S_T6_ODEC 0 +#define M_T6_ODEC 0xfU +#define V_T6_ODEC(x) ((x) << S_T6_ODEC) +#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC) + #define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS 0x3378 + +#define S_RX_LINKB_ACCCMP_RIS 11 +#define V_RX_LINKB_ACCCMP_RIS(x) ((x) << S_RX_LINKB_ACCCMP_RIS) +#define F_RX_LINKB_ACCCMP_RIS V_RX_LINKB_ACCCMP_RIS(1U) + #define A_MAC_PORT_RX_LINKB_DFE_FUNCTION_CONTROL_1 0x337c #define A_MAC_PORT_RX_LINKB_DFE_FUNCTION_CONTROL_2 0x3380 #define A_MAC_PORT_RX_LINKB_DFE_OFFSET_EVN1_EVN2 0x3384 +#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_CHANNEL 0x3384 #define A_MAC_PORT_RX_LINKB_DFE_OFFSET_ODD1_ODD2 0x3388 +#define A_MAC_PORT_RX_LINKB_DFE_OFFSET_VALUE 0x3388 #define A_MAC_PORT_RX_LINKB_DFE_OFFSET_EVN3_EVN4 0x338c +#define A_MAC_PORT_RX_LINKB_H_COEFFICIENBT_BIST 0x338c #define A_MAC_PORT_RX_LINKB_DFE_OFFSET_ODD3_ODD4 0x3390 +#define A_MAC_PORT_RX_LINKB_AC_CAPACITOR_BIST 0x3390 + +#define S_RX_LINKB_ACCCMP_BIST 13 +#define V_RX_LINKB_ACCCMP_BIST(x) ((x) << S_RX_LINKB_ACCCMP_BIST) +#define F_RX_LINKB_ACCCMP_BIST V_RX_LINKB_ACCCMP_BIST(1U) + #define A_MAC_PORT_RX_LINKB_DFE_E0_AND_E1_OFFSET 0x3394 #define A_MAC_PORT_RX_LINKB_RECEIVER_LOFF_CONTROL 0x3398 +#define A_MAC_PORT_RX_LINKB_RECEIVER_LOFF_CONTROL_REGISTER 0x3398 #define A_MAC_PORT_RX_LINKB_RECEIVER_SIGDET_CONTROL 0x339c #define A_MAC_PORT_RX_LINKB_RECEIVER_ANALOG_CONTROL_SWITCH 0x33a0 #define A_MAC_PORT_RX_LINKB_INTEGRATOR_DAC_OFFSET 0x33a4 #define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_CONTROL 0x33a8 #define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS 0x33ac + +#define S_T6_EMMD 3 +#define M_T6_EMMD 0x3U +#define V_T6_EMMD(x) ((x) << S_T6_EMMD) +#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD) + +#define S_T6_EMBRDY 2 +#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY) +#define F_T6_EMBRDY V_T6_EMBRDY(1U) + +#define S_T6_EMBUMP 1 +#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP) +#define F_T6_EMBUMP V_T6_EMBUMP(1U) + #define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_ERROR_COUNT 0x33b0 #define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x33b4 #define A_MAC_PORT_RX_LINKB_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x33b8 +#define A_MAC_PORT_RX_LINKB_DFE_FUNCTION_CONTROL_3 0x33bc #define A_MAC_PORT_RX_LINKB_DFE_TAP_ENABLE 0x33c0 +#define A_MAC_PORT_RX_LINKB_DFE_TAP_CONTROL 0x33c0 + +#define S_RX_LINKB_INDEX_DFE_TC 0 +#define M_RX_LINKB_INDEX_DFE_TC 0xfU +#define V_RX_LINKB_INDEX_DFE_TC(x) ((x) << S_RX_LINKB_INDEX_DFE_TC) +#define G_RX_LINKB_INDEX_DFE_TC(x) (((x) >> S_RX_LINKB_INDEX_DFE_TC) & M_RX_LINKB_INDEX_DFE_TC) + #define A_MAC_PORT_RX_LINKB_DFE_H1 0x33c4 +#define A_MAC_PORT_RX_LINKB_DFE_TAP 0x33c4 + +#define S_RX_LINKB_INDEX_DFE_TAP 0 +#define M_RX_LINKB_INDEX_DFE_TAP 0xfU +#define V_RX_LINKB_INDEX_DFE_TAP(x) ((x) << S_RX_LINKB_INDEX_DFE_TAP) +#define G_RX_LINKB_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKB_INDEX_DFE_TAP) & M_RX_LINKB_INDEX_DFE_TAP) + #define A_MAC_PORT_RX_LINKB_DFE_H2 0x33c8 #define A_MAC_PORT_RX_LINKB_DFE_H3 0x33cc #define A_MAC_PORT_RX_LINKB_DFE_H4 0x33d0 @@ -38005,125 +54588,538 @@ #define A_MAC_PORT_RX_LINKB_DFE_H8_AND_H9 0x33dc #define A_MAC_PORT_RX_LINKB_DFE_H10_AND_H11 0x33e0 #define A_MAC_PORT_RX_LINKB_DFE_H12 0x33e4 +#define A_MAC_PORT_RX_LINKB_RECEIVER_INTERNAL_STATUS_2 0x33e4 +#define A_MAC_PORT_RX_LINKB_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x33e8 +#define A_MAC_PORT_RX_LINKB_RECEIVER_DCD_CONTROL 0x33ec +#define A_MAC_PORT_RX_LINKB_RECEIVER_DCC_CONTROL 0x33f0 + +#define S_RX_LINKB_DCCSTEP_RXCTL 10 +#define M_RX_LINKB_DCCSTEP_RXCTL 0x3U +#define V_RX_LINKB_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKB_DCCSTEP_RXCTL) +#define G_RX_LINKB_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKB_DCCSTEP_RXCTL) & M_RX_LINKB_DCCSTEP_RXCTL) + +#define S_RX_LINKB_DCCLOCK_RXCTL 8 +#define V_RX_LINKB_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKB_DCCLOCK_RXCTL) +#define F_RX_LINKB_DCCLOCK_RXCTL V_RX_LINKB_DCCLOCK_RXCTL(1U) + +#define A_MAC_PORT_RX_LINKB_RECEIVER_QCC_CONTROL 0x33f4 #define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_2 0x33f8 +#define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x33f8 #define A_MAC_PORT_RX_LINKB_RECEIVER_MACRO_TEST_CONTROL_1 0x33fc #define A_MAC_PORT_TX_LINKC_TRANSMIT_CONFIGURATION_MODE 0x3400 + +#define S_T6_T5_TX_RXLOOP 5 +#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP) +#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U) + +#define S_T6_T5_TX_BWSEL 2 +#define M_T6_T5_TX_BWSEL 0x3U +#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL) +#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_TEST_CONTROL 0x3404 + +#define S_T6_ERROR 9 +#define V_T6_ERROR(x) ((x) << S_T6_ERROR) +#define F_T6_ERROR V_T6_ERROR(1U) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_COEFFICIENT_CONTROL 0x3408 #define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_MODE_CONTROL 0x340c #define A_MAC_PORT_TX_LINKC_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3410 #define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3414 #define A_MAC_PORT_TX_LINKC_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3418 + +#define S_T6_CALSSTN 8 +#define M_T6_CALSSTN 0x3fU +#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN) +#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN) + +#define S_T6_CALSSTP 0 +#define M_T6_CALSSTP 0x3fU +#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP) +#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x341c + +#define S_T6_DRTOL 2 +#define M_T6_DRTOL 0x7U +#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL) +#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT 0x3420 + +#define S_T6_NXTT0 0 +#define M_T6_NXTT0 0x3fU +#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0) +#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT 0x3424 #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT 0x3428 + +#define S_T6_NXTT2 0 +#define M_T6_NXTT2 0x3fU +#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2) +#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2) + +#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_3_COEFFICIENT 0x342c #define A_MAC_PORT_TX_LINKC_TRANSMIT_AMPLITUDE 0x3430 #define A_MAC_PORT_TX_LINKC_TRANSMIT_POLARITY 0x3434 + +#define S_T6_NXTPOL 0 +#define M_T6_NXTPOL 0xfU +#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL) +#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3438 + +#define S_T6_C0UPDT 6 +#define M_T6_C0UPDT 0x3U +#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT) +#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT) + +#define S_T6_C2UPDT 2 +#define M_T6_C2UPDT 0x3U +#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT) +#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT) + +#define S_T6_C1UPDT 0 +#define M_T6_C1UPDT 0x3U +#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT) +#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x343c + +#define S_T6_C0STAT 6 +#define M_T6_C0STAT 0x3U +#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT) +#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT) + +#define S_T6_C2STAT 2 +#define M_T6_C2STAT 0x3U +#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT) +#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT) + +#define S_T6_C1STAT 0 +#define M_T6_C1STAT 0x3U +#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT) +#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3440 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3440 #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3444 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3444 #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3448 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3448 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x344c +#define A_MAC_PORT_TX_LINKC_TRANSMIT_APPLIED_TUNE_REGISTER 0x3450 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3458 #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3460 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_4X_SEGMENT_APPLIED 0x3460 #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3464 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_2X_SEGMENT_APPLIED 0x3464 #define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3468 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_1X_SEGMENT_APPLIED 0x3468 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x346c #define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3470 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3470 #define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3474 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3474 #define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3478 #define A_MAC_PORT_TX_LINKC_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x347c + +#define S_T6_XADDR 1 +#define M_T6_XADDR 0x1fU +#define V_T6_XADDR(x) ((x) << S_T6_XADDR) +#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3480 #define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3484 #define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3488 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3488 #define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x348c +#define A_MAC_PORT_TX_LINKC_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x348c #define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_OVERRIDE 0x3490 #define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_APPLIED 0x3494 #define A_MAC_PORT_TX_LINKC_TRANSMIT_DCC_TIME_OUT 0x3498 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AZ_CONTROL 0x349c +#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_CONTROL 0x34a0 + +#define S_T6_DCCTIMEEN 13 +#define M_T6_DCCTIMEEN 0x3U +#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN) +#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN) + +#define S_T6_DCCLOCK 11 +#define M_T6_DCCLOCK 0x3U +#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK) +#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK) + +#define S_T6_DCCOFFSET 8 +#define M_T6_DCCOFFSET 0x7U +#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET) +#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET) + +#define S_TX_LINKC_DCCSTEP_CTL 6 +#define M_TX_LINKC_DCCSTEP_CTL 0x3U +#define V_TX_LINKC_DCCSTEP_CTL(x) ((x) << S_TX_LINKC_DCCSTEP_CTL) +#define G_TX_LINKC_DCCSTEP_CTL(x) (((x) >> S_TX_LINKC_DCCSTEP_CTL) & M_TX_LINKC_DCCSTEP_CTL) + +#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_OVERRIDE 0x34a4 +#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_APPLIED 0x34a8 +#define A_T6_MAC_PORT_TX_LINKC_TRANSMIT_DCC_TIME_OUT 0x34ac +#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SIGN_OVERRIDE 0x34c0 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_4X_OVERRIDE 0x34c8 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_2X_OVERRIDE 0x34cc +#define A_MAC_PORT_TX_LINKC_TRANSMIT_SEGMENT_1X_OVERRIDE 0x34d0 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x34d8 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x34dc +#define A_MAC_PORT_TX_LINKC_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x34e0 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_5 0x34ec #define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_4 0x34f0 + +#define S_T6_SDOVRD 0 +#define M_T6_SDOVRD 0xffffU +#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD) +#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_3 0x34f4 #define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_2 0x34f8 #define A_MAC_PORT_TX_LINKC_TRANSMIT_MACRO_TEST_CONTROL_1 0x34fc + +#define S_T6_SDOVRDEN 15 +#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN) +#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_CONFIGURATION_MODE 0x3500 + +#define S_T6_T5_TX_RXLOOP 5 +#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP) +#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U) + +#define S_T6_T5_TX_BWSEL 2 +#define M_T6_T5_TX_BWSEL 0x3U +#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL) +#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_TEST_CONTROL 0x3504 + +#define S_T6_ERROR 9 +#define V_T6_ERROR(x) ((x) << S_T6_ERROR) +#define F_T6_ERROR V_T6_ERROR(1U) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_COEFFICIENT_CONTROL 0x3508 #define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_MODE_CONTROL 0x350c #define A_MAC_PORT_TX_LINKD_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3510 #define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3514 #define A_MAC_PORT_TX_LINKD_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3518 + +#define S_T6_CALSSTN 8 +#define M_T6_CALSSTN 0x3fU +#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN) +#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN) + +#define S_T6_CALSSTP 0 +#define M_T6_CALSSTP 0x3fU +#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP) +#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x351c + +#define S_T6_DRTOL 2 +#define M_T6_DRTOL 0x7U +#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL) +#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT 0x3520 + +#define S_T6_NXTT0 0 +#define M_T6_NXTT0 0x3fU +#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0) +#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT 0x3524 #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT 0x3528 + +#define S_T6_NXTT2 0 +#define M_T6_NXTT2 0x3fU +#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2) +#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2) + +#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_3_COEFFICIENT 0x352c #define A_MAC_PORT_TX_LINKD_TRANSMIT_AMPLITUDE 0x3530 #define A_MAC_PORT_TX_LINKD_TRANSMIT_POLARITY 0x3534 + +#define S_T6_NXTPOL 0 +#define M_T6_NXTPOL 0xfU +#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL) +#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3538 + +#define S_T6_C0UPDT 6 +#define M_T6_C0UPDT 0x3U +#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT) +#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT) + +#define S_T6_C2UPDT 2 +#define M_T6_C2UPDT 0x3U +#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT) +#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT) + +#define S_T6_C1UPDT 0 +#define M_T6_C1UPDT 0x3U +#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT) +#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x353c + +#define S_T6_C0STAT 6 +#define M_T6_C0STAT 0x3U +#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT) +#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT) + +#define S_T6_C2STAT 2 +#define M_T6_C2STAT 0x3U +#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT) +#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT) + +#define S_T6_C1STAT 0 +#define M_T6_C1STAT 0x3U +#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT) +#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3540 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3540 #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3544 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3544 #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3548 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3548 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x354c +#define A_MAC_PORT_TX_LINKD_TRANSMIT_APPLIED_TUNE_REGISTER 0x3550 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3558 #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3560 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_4X_SEGMENT_APPLIED 0x3560 #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3564 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_2X_SEGMENT_APPLIED 0x3564 #define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3568 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_1X_SEGMENT_APPLIED 0x3568 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x356c #define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3570 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3570 #define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3574 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3574 #define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3578 #define A_MAC_PORT_TX_LINKD_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x357c + +#define S_T6_XADDR 1 +#define M_T6_XADDR 0x1fU +#define V_T6_XADDR(x) ((x) << S_T6_XADDR) +#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3580 #define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3584 #define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3588 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3588 #define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x358c +#define A_MAC_PORT_TX_LINKD_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x358c #define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_OVERRIDE 0x3590 #define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_APPLIED 0x3594 #define A_MAC_PORT_TX_LINKD_TRANSMIT_DCC_TIME_OUT 0x3598 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AZ_CONTROL 0x359c +#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_CONTROL 0x35a0 + +#define S_T6_DCCTIMEEN 13 +#define M_T6_DCCTIMEEN 0x3U +#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN) +#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN) + +#define S_T6_DCCLOCK 11 +#define M_T6_DCCLOCK 0x3U +#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK) +#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK) + +#define S_T6_DCCOFFSET 8 +#define M_T6_DCCOFFSET 0x7U +#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET) +#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET) + +#define S_TX_LINKD_DCCSTEP_CTL 6 +#define M_TX_LINKD_DCCSTEP_CTL 0x3U +#define V_TX_LINKD_DCCSTEP_CTL(x) ((x) << S_TX_LINKD_DCCSTEP_CTL) +#define G_TX_LINKD_DCCSTEP_CTL(x) (((x) >> S_TX_LINKD_DCCSTEP_CTL) & M_TX_LINKD_DCCSTEP_CTL) + +#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_OVERRIDE 0x35a4 +#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_APPLIED 0x35a8 +#define A_T6_MAC_PORT_TX_LINKD_TRANSMIT_DCC_TIME_OUT 0x35ac +#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SIGN_OVERRIDE 0x35c0 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_4X_OVERRIDE 0x35c8 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_2X_OVERRIDE 0x35cc +#define A_MAC_PORT_TX_LINKD_TRANSMIT_SEGMENT_1X_OVERRIDE 0x35d0 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x35d8 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x35dc +#define A_MAC_PORT_TX_LINKD_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x35e0 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_5 0x35ec #define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_4 0x35f0 + +#define S_T6_SDOVRD 0 +#define M_T6_SDOVRD 0xffffU +#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD) +#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_3 0x35f4 #define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_2 0x35f8 #define A_MAC_PORT_TX_LINKD_TRANSMIT_MACRO_TEST_CONTROL_1 0x35fc + +#define S_T6_SDOVRDEN 15 +#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN) +#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U) + #define A_MAC_PORT_RX_LINKC_RECEIVER_CONFIGURATION_MODE 0x3600 #define A_MAC_PORT_RX_LINKC_RECEIVER_TEST_CONTROL 0x3604 #define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_CONTROL 0x3608 #define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_OFFSET_CONTROL 0x360c + +#define S_T6_TMSCAL 8 +#define M_T6_TMSCAL 0x3U +#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL) +#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL) + +#define S_T6_APADJ 7 +#define V_T6_APADJ(x) ((x) << S_T6_APADJ) +#define F_T6_APADJ V_T6_APADJ(1U) + +#define S_T6_RSEL 6 +#define V_T6_RSEL(x) ((x) << S_T6_RSEL) +#define F_T6_RSEL V_T6_RSEL(1U) + +#define S_T6_PHOFFS 0 +#define M_T6_PHOFFS 0x3fU +#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS) +#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS) + #define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_1 0x3610 #define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_POSITION_2 0x3614 #define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3618 #define A_MAC_PORT_RX_LINKC_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x361c #define A_MAC_PORT_RX_LINKC_DFE_CONTROL 0x3620 + +#define S_T6_SPIFMT 8 +#define M_T6_SPIFMT 0xfU +#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT) +#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT) + #define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_1 0x3624 #define A_MAC_PORT_RX_LINKC_DFE_SAMPLE_SNAPSHOT_2 0x3628 #define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_1 0x362c + +#define S_T6_WRAPSEL 15 +#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL) +#define F_T6_WRAPSEL V_T6_WRAPSEL(1U) + +#define S_T6_PEAK 9 +#define M_T6_PEAK 0x1fU +#define V_T6_PEAK(x) ((x) << S_T6_PEAK) +#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK) + #define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_2 0x3630 + +#define S_T6_T5VGAIN 0 +#define M_T6_T5VGAIN 0x7fU +#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN) +#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN) + #define A_MAC_PORT_RX_LINKC_RECEIVER_VGA_CONTROL_3 0x3634 #define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_1 0x3638 +#define A_MAC_PORT_RX_LINKC_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3638 +#define A_MAC_PORT_RX_LINKC_RECEIVER_IQAMP_CONTROL_1 0x363c #define A_MAC_PORT_RX_LINKC_RECEIVER_DQCC_CONTROL_3 0x3640 +#define A_MAC_PORT_RX_LINKC_RECEIVER_IQAMP_CONTROL_2 0x3640 +#define A_MAC_PORT_RX_LINKC_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3644 #define A_MAC_PORT_RX_LINKC_RECEIVER_DACAP_AND_DACAN 0x3648 #define A_MAC_PORT_RX_LINKC_RECEIVER_DACA_MIN_AND_DACAZ 0x364c +#define A_MAC_PORT_RX_LINKC_RECEIVER_DACA_MIN 0x364c #define A_MAC_PORT_RX_LINKC_RECEIVER_ADAC_CONTROL 0x3650 +#define A_MAC_PORT_RX_LINKC_RECEIVER_AC_COUPLING_CONTROL 0x3654 +#define A_MAC_PORT_RX_LINKC_RECEIVER_AC_COUPLING_VALUE 0x3658 #define A_MAC_PORT_RX_LINKC_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x365c +#define A_MAC_PORT_RX_LINKC_DFE_H1H2H3_LOCAL_OFFSET 0x365c #define A_MAC_PORT_RX_LINKC_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3660 +#define A_MAC_PORT_RX_LINKC_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3660 #define A_MAC_PORT_RX_LINKC_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3664 +#define A_MAC_PORT_RX_LINKC_PEAKED_INTEGRATOR 0x3664 +#define A_MAC_PORT_RX_LINKC_CDR_ANALOG_SWITCH 0x3668 +#define A_MAC_PORT_RX_LINKC_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x366c #define A_MAC_PORT_RX_LINKC_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3670 #define A_MAC_PORT_RX_LINKC_DYNAMIC_DATA_CENTERING_DDC 0x3674 + +#define S_T6_ODEC 0 +#define M_T6_ODEC 0xfU +#define V_T6_ODEC(x) ((x) << S_T6_ODEC) +#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC) + #define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS 0x3678 + +#define S_RX_LINKC_ACCCMP_RIS 11 +#define V_RX_LINKC_ACCCMP_RIS(x) ((x) << S_RX_LINKC_ACCCMP_RIS) +#define F_RX_LINKC_ACCCMP_RIS V_RX_LINKC_ACCCMP_RIS(1U) + #define A_MAC_PORT_RX_LINKC_DFE_FUNCTION_CONTROL_1 0x367c #define A_MAC_PORT_RX_LINKC_DFE_FUNCTION_CONTROL_2 0x3680 #define A_MAC_PORT_RX_LINKC_DFE_OFFSET_EVN1_EVN2 0x3684 +#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_CHANNEL 0x3684 #define A_MAC_PORT_RX_LINKC_DFE_OFFSET_ODD1_ODD2 0x3688 +#define A_MAC_PORT_RX_LINKC_DFE_OFFSET_VALUE 0x3688 #define A_MAC_PORT_RX_LINKC_DFE_OFFSET_EVN3_EVN4 0x368c +#define A_MAC_PORT_RX_LINKC_H_COEFFICIENBT_BIST 0x368c #define A_MAC_PORT_RX_LINKC_DFE_OFFSET_ODD3_ODD4 0x3690 +#define A_MAC_PORT_RX_LINKC_AC_CAPACITOR_BIST 0x3690 + +#define S_RX_LINKC_ACCCMP_BIST 13 +#define V_RX_LINKC_ACCCMP_BIST(x) ((x) << S_RX_LINKC_ACCCMP_BIST) +#define F_RX_LINKC_ACCCMP_BIST V_RX_LINKC_ACCCMP_BIST(1U) + #define A_MAC_PORT_RX_LINKC_DFE_E0_AND_E1_OFFSET 0x3694 #define A_MAC_PORT_RX_LINKC_RECEIVER_LOFF_CONTROL 0x3698 +#define A_MAC_PORT_RX_LINKC_RECEIVER_LOFF_CONTROL_REGISTER 0x3698 #define A_MAC_PORT_RX_LINKC_RECEIVER_SIGDET_CONTROL 0x369c #define A_MAC_PORT_RX_LINKC_RECEIVER_ANALOG_CONTROL_SWITCH 0x36a0 #define A_MAC_PORT_RX_LINKC_INTEGRATOR_DAC_OFFSET 0x36a4 #define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_CONTROL 0x36a8 #define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS 0x36ac + +#define S_T6_EMMD 3 +#define M_T6_EMMD 0x3U +#define V_T6_EMMD(x) ((x) << S_T6_EMMD) +#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD) + +#define S_T6_EMBRDY 2 +#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY) +#define F_T6_EMBRDY V_T6_EMBRDY(1U) + +#define S_T6_EMBUMP 1 +#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP) +#define F_T6_EMBUMP V_T6_EMBUMP(1U) + #define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_ERROR_COUNT 0x36b0 #define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x36b4 #define A_MAC_PORT_RX_LINKC_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x36b8 +#define A_MAC_PORT_RX_LINKC_DFE_FUNCTION_CONTROL_3 0x36bc #define A_MAC_PORT_RX_LINKC_DFE_TAP_ENABLE 0x36c0 +#define A_MAC_PORT_RX_LINKC_DFE_TAP_CONTROL 0x36c0 + +#define S_RX_LINKC_INDEX_DFE_TC 0 +#define M_RX_LINKC_INDEX_DFE_TC 0xfU +#define V_RX_LINKC_INDEX_DFE_TC(x) ((x) << S_RX_LINKC_INDEX_DFE_TC) +#define G_RX_LINKC_INDEX_DFE_TC(x) (((x) >> S_RX_LINKC_INDEX_DFE_TC) & M_RX_LINKC_INDEX_DFE_TC) + #define A_MAC_PORT_RX_LINKC_DFE_H1 0x36c4 +#define A_MAC_PORT_RX_LINKC_DFE_TAP 0x36c4 + +#define S_RX_LINKC_INDEX_DFE_TAP 0 +#define M_RX_LINKC_INDEX_DFE_TAP 0xfU +#define V_RX_LINKC_INDEX_DFE_TAP(x) ((x) << S_RX_LINKC_INDEX_DFE_TAP) +#define G_RX_LINKC_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKC_INDEX_DFE_TAP) & M_RX_LINKC_INDEX_DFE_TAP) + #define A_MAC_PORT_RX_LINKC_DFE_H2 0x36c8 #define A_MAC_PORT_RX_LINKC_DFE_H3 0x36cc #define A_MAC_PORT_RX_LINKC_DFE_H4 0x36d0 @@ -38132,51 +55128,170 @@ #define A_MAC_PORT_RX_LINKC_DFE_H8_AND_H9 0x36dc #define A_MAC_PORT_RX_LINKC_DFE_H10_AND_H11 0x36e0 #define A_MAC_PORT_RX_LINKC_DFE_H12 0x36e4 +#define A_MAC_PORT_RX_LINKC_RECEIVER_INTERNAL_STATUS_2 0x36e4 +#define A_MAC_PORT_RX_LINKC_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x36e8 +#define A_MAC_PORT_RX_LINKC_RECEIVER_DCD_CONTROL 0x36ec +#define A_MAC_PORT_RX_LINKC_RECEIVER_DCC_CONTROL 0x36f0 + +#define S_RX_LINKC_DCCSTEP_RXCTL 10 +#define M_RX_LINKC_DCCSTEP_RXCTL 0x3U +#define V_RX_LINKC_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKC_DCCSTEP_RXCTL) +#define G_RX_LINKC_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKC_DCCSTEP_RXCTL) & M_RX_LINKC_DCCSTEP_RXCTL) + +#define S_RX_LINKC_DCCLOCK_RXCTL 8 +#define V_RX_LINKC_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKC_DCCLOCK_RXCTL) +#define F_RX_LINKC_DCCLOCK_RXCTL V_RX_LINKC_DCCLOCK_RXCTL(1U) + +#define A_MAC_PORT_RX_LINKC_RECEIVER_QCC_CONTROL 0x36f4 #define A_MAC_PORT_RX_LINKC_RECEIVER_MACRO_TEST_CONTROL_2 0x36f8 +#define A_MAC_PORT_RX_LINKC_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x36f8 #define A_MAC_PORT_RX_LINKC_RECEIVER_MACRO_TEST_CONTROL_1 0x36fc #define A_MAC_PORT_RX_LINKD_RECEIVER_CONFIGURATION_MODE 0x3700 #define A_MAC_PORT_RX_LINKD_RECEIVER_TEST_CONTROL 0x3704 #define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_CONTROL 0x3708 #define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_OFFSET_CONTROL 0x370c + +#define S_T6_TMSCAL 8 +#define M_T6_TMSCAL 0x3U +#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL) +#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL) + +#define S_T6_APADJ 7 +#define V_T6_APADJ(x) ((x) << S_T6_APADJ) +#define F_T6_APADJ V_T6_APADJ(1U) + +#define S_T6_RSEL 6 +#define V_T6_RSEL(x) ((x) << S_T6_RSEL) +#define F_T6_RSEL V_T6_RSEL(1U) + +#define S_T6_PHOFFS 0 +#define M_T6_PHOFFS 0x3fU +#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS) +#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS) + #define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_1 0x3710 #define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_POSITION_2 0x3714 #define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3718 #define A_MAC_PORT_RX_LINKD_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x371c #define A_MAC_PORT_RX_LINKD_DFE_CONTROL 0x3720 + +#define S_T6_SPIFMT 8 +#define M_T6_SPIFMT 0xfU +#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT) +#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT) + #define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_1 0x3724 #define A_MAC_PORT_RX_LINKD_DFE_SAMPLE_SNAPSHOT_2 0x3728 #define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_1 0x372c + +#define S_T6_WRAPSEL 15 +#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL) +#define F_T6_WRAPSEL V_T6_WRAPSEL(1U) + +#define S_T6_PEAK 9 +#define M_T6_PEAK 0x1fU +#define V_T6_PEAK(x) ((x) << S_T6_PEAK) +#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK) + #define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_2 0x3730 + +#define S_T6_T5VGAIN 0 +#define M_T6_T5VGAIN 0x7fU +#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN) +#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN) + #define A_MAC_PORT_RX_LINKD_RECEIVER_VGA_CONTROL_3 0x3734 #define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_1 0x3738 +#define A_MAC_PORT_RX_LINKD_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3738 +#define A_MAC_PORT_RX_LINKD_RECEIVER_IQAMP_CONTROL_1 0x373c #define A_MAC_PORT_RX_LINKD_RECEIVER_DQCC_CONTROL_3 0x3740 +#define A_MAC_PORT_RX_LINKD_RECEIVER_IQAMP_CONTROL_2 0x3740 +#define A_MAC_PORT_RX_LINKD_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3744 #define A_MAC_PORT_RX_LINKD_RECEIVER_DACAP_AND_DACAN 0x3748 #define A_MAC_PORT_RX_LINKD_RECEIVER_DACA_MIN_AND_DACAZ 0x374c +#define A_MAC_PORT_RX_LINKD_RECEIVER_DACA_MIN 0x374c #define A_MAC_PORT_RX_LINKD_RECEIVER_ADAC_CONTROL 0x3750 +#define A_MAC_PORT_RX_LINKD_RECEIVER_AC_COUPLING_CONTROL 0x3754 +#define A_MAC_PORT_RX_LINKD_RECEIVER_AC_COUPLING_VALUE 0x3758 #define A_MAC_PORT_RX_LINKD_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x375c +#define A_MAC_PORT_RX_LINKD_DFE_H1H2H3_LOCAL_OFFSET 0x375c #define A_MAC_PORT_RX_LINKD_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3760 +#define A_MAC_PORT_RX_LINKD_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3760 #define A_MAC_PORT_RX_LINKD_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3764 +#define A_MAC_PORT_RX_LINKD_PEAKED_INTEGRATOR 0x3764 +#define A_MAC_PORT_RX_LINKD_CDR_ANALOG_SWITCH 0x3768 +#define A_MAC_PORT_RX_LINKD_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x376c #define A_MAC_PORT_RX_LINKD_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3770 #define A_MAC_PORT_RX_LINKD_DYNAMIC_DATA_CENTERING_DDC 0x3774 + +#define S_T6_ODEC 0 +#define M_T6_ODEC 0xfU +#define V_T6_ODEC(x) ((x) << S_T6_ODEC) +#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC) + #define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS 0x3778 + +#define S_RX_LINKD_ACCCMP_RIS 11 +#define V_RX_LINKD_ACCCMP_RIS(x) ((x) << S_RX_LINKD_ACCCMP_RIS) +#define F_RX_LINKD_ACCCMP_RIS V_RX_LINKD_ACCCMP_RIS(1U) + #define A_MAC_PORT_RX_LINKD_DFE_FUNCTION_CONTROL_1 0x377c #define A_MAC_PORT_RX_LINKD_DFE_FUNCTION_CONTROL_2 0x3780 #define A_MAC_PORT_RX_LINKD_DFE_OFFSET_EVN1_EVN2 0x3784 +#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_CHANNEL 0x3784 #define A_MAC_PORT_RX_LINKD_DFE_OFFSET_ODD1_ODD2 0x3788 +#define A_MAC_PORT_RX_LINKD_DFE_OFFSET_VALUE 0x3788 #define A_MAC_PORT_RX_LINKD_DFE_OFFSET_EVN3_EVN4 0x378c +#define A_MAC_PORT_RX_LINKD_H_COEFFICIENBT_BIST 0x378c #define A_MAC_PORT_RX_LINKD_DFE_OFFSET_ODD3_ODD4 0x3790 +#define A_MAC_PORT_RX_LINKD_AC_CAPACITOR_BIST 0x3790 + +#define S_RX_LINKD_ACCCMP_BIST 13 +#define V_RX_LINKD_ACCCMP_BIST(x) ((x) << S_RX_LINKD_ACCCMP_BIST) +#define F_RX_LINKD_ACCCMP_BIST V_RX_LINKD_ACCCMP_BIST(1U) + #define A_MAC_PORT_RX_LINKD_DFE_E0_AND_E1_OFFSET 0x3794 #define A_MAC_PORT_RX_LINKD_RECEIVER_LOFF_CONTROL 0x3798 +#define A_MAC_PORT_RX_LINKD_RECEIVER_LOFF_CONTROL_REGISTER 0x3798 #define A_MAC_PORT_RX_LINKD_RECEIVER_SIGDET_CONTROL 0x379c #define A_MAC_PORT_RX_LINKD_RECEIVER_ANALOG_CONTROL_SWITCH 0x37a0 #define A_MAC_PORT_RX_LINKD_INTEGRATOR_DAC_OFFSET 0x37a4 #define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_CONTROL 0x37a8 #define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS 0x37ac + +#define S_T6_EMMD 3 +#define M_T6_EMMD 0x3U +#define V_T6_EMMD(x) ((x) << S_T6_EMMD) +#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD) + +#define S_T6_EMBRDY 2 +#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY) +#define F_T6_EMBRDY V_T6_EMBRDY(1U) + +#define S_T6_EMBUMP 1 +#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP) +#define F_T6_EMBUMP V_T6_EMBUMP(1U) + #define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_ERROR_COUNT 0x37b0 #define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x37b4 #define A_MAC_PORT_RX_LINKD_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x37b8 +#define A_MAC_PORT_RX_LINKD_DFE_FUNCTION_CONTROL_3 0x37bc #define A_MAC_PORT_RX_LINKD_DFE_TAP_ENABLE 0x37c0 +#define A_MAC_PORT_RX_LINKD_DFE_TAP_CONTROL 0x37c0 + +#define S_RX_LINKD_INDEX_DFE_TC 0 +#define M_RX_LINKD_INDEX_DFE_TC 0xfU +#define V_RX_LINKD_INDEX_DFE_TC(x) ((x) << S_RX_LINKD_INDEX_DFE_TC) +#define G_RX_LINKD_INDEX_DFE_TC(x) (((x) >> S_RX_LINKD_INDEX_DFE_TC) & M_RX_LINKD_INDEX_DFE_TC) + #define A_MAC_PORT_RX_LINKD_DFE_H1 0x37c4 +#define A_MAC_PORT_RX_LINKD_DFE_TAP 0x37c4 + +#define S_RX_LINKD_INDEX_DFE_TAP 0 +#define M_RX_LINKD_INDEX_DFE_TAP 0xfU +#define V_RX_LINKD_INDEX_DFE_TAP(x) ((x) << S_RX_LINKD_INDEX_DFE_TAP) +#define G_RX_LINKD_INDEX_DFE_TAP(x) (((x) >> S_RX_LINKD_INDEX_DFE_TAP) & M_RX_LINKD_INDEX_DFE_TAP) + #define A_MAC_PORT_RX_LINKD_DFE_H2 0x37c8 #define A_MAC_PORT_RX_LINKD_DFE_H3 0x37cc #define A_MAC_PORT_RX_LINKD_DFE_H4 0x37d0 @@ -38185,7 +55300,23 @@ #define A_MAC_PORT_RX_LINKD_DFE_H8_AND_H9 0x37dc #define A_MAC_PORT_RX_LINKD_DFE_H10_AND_H11 0x37e0 #define A_MAC_PORT_RX_LINKD_DFE_H12 0x37e4 +#define A_MAC_PORT_RX_LINKD_RECEIVER_INTERNAL_STATUS_2 0x37e4 +#define A_MAC_PORT_RX_LINKD_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x37e8 +#define A_MAC_PORT_RX_LINKD_RECEIVER_DCD_CONTROL 0x37ec +#define A_MAC_PORT_RX_LINKD_RECEIVER_DCC_CONTROL 0x37f0 + +#define S_RX_LINKD_DCCSTEP_RXCTL 10 +#define M_RX_LINKD_DCCSTEP_RXCTL 0x3U +#define V_RX_LINKD_DCCSTEP_RXCTL(x) ((x) << S_RX_LINKD_DCCSTEP_RXCTL) +#define G_RX_LINKD_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINKD_DCCSTEP_RXCTL) & M_RX_LINKD_DCCSTEP_RXCTL) + +#define S_RX_LINKD_DCCLOCK_RXCTL 8 +#define V_RX_LINKD_DCCLOCK_RXCTL(x) ((x) << S_RX_LINKD_DCCLOCK_RXCTL) +#define F_RX_LINKD_DCCLOCK_RXCTL V_RX_LINKD_DCCLOCK_RXCTL(1U) + +#define A_MAC_PORT_RX_LINKD_RECEIVER_QCC_CONTROL 0x37f4 #define A_MAC_PORT_RX_LINKD_RECEIVER_MACRO_TEST_CONTROL_2 0x37f8 +#define A_MAC_PORT_RX_LINKD_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x37f8 #define A_MAC_PORT_RX_LINKD_RECEIVER_MACRO_TEST_CONTROL_1 0x37fc #define A_MAC_PORT_ANALOG_TEST_MUX 0x3814 #define A_MAC_PORT_BANDGAP_CONTROL 0x382c @@ -38195,6 +55326,52 @@ #define V_T5BGCTL(x) ((x) << S_T5BGCTL) #define G_T5BGCTL(x) (((x) >> S_T5BGCTL) & M_T5BGCTL) +#define A_MAC_PORT_PLLREFSEL_CONTROL 0x3854 + +#define S_REFSEL 0 +#define M_REFSEL 0x7U +#define V_REFSEL(x) ((x) << S_REFSEL) +#define G_REFSEL(x) (((x) >> S_REFSEL) & M_REFSEL) + +#define A_MAC_PORT_REFISINK_CONTROL 0x3858 + +#define S_REFISINK 0 +#define M_REFISINK 0x3fU +#define V_REFISINK(x) ((x) << S_REFISINK) +#define G_REFISINK(x) (((x) >> S_REFISINK) & M_REFISINK) + +#define A_MAC_PORT_REFISRC_CONTROL 0x385c + +#define S_REFISRC 0 +#define M_REFISRC 0x3fU +#define V_REFISRC(x) ((x) << S_REFISRC) +#define G_REFISRC(x) (((x) >> S_REFISRC) & M_REFISRC) + +#define A_MAC_PORT_REFVREG_CONTROL 0x3860 + +#define S_REFVREG 0 +#define M_REFVREG 0x3fU +#define V_REFVREG(x) ((x) << S_REFVREG) +#define G_REFVREG(x) (((x) >> S_REFVREG) & M_REFVREG) + +#define A_MAC_PORT_VBGENDOC_CONTROL 0x3864 + +#define S_BGCLKSEL 2 +#define V_BGCLKSEL(x) ((x) << S_BGCLKSEL) +#define F_BGCLKSEL V_BGCLKSEL(1U) + +#define S_VBGENDOC 0 +#define M_VBGENDOC 0x3U +#define V_VBGENDOC(x) ((x) << S_VBGENDOC) +#define G_VBGENDOC(x) (((x) >> S_VBGENDOC) & M_VBGENDOC) + +#define A_MAC_PORT_VREFTUNE_CONTROL 0x3868 + +#define S_VREFTUNE 0 +#define M_VREFTUNE 0xfU +#define V_VREFTUNE(x) ((x) << S_VREFTUNE) +#define G_VREFTUNE(x) (((x) >> S_VREFTUNE) & M_VREFTUNE) + #define A_MAC_PORT_RESISTOR_CALIBRATION_CONTROL 0x3880 #define S_RCCTL1 5 @@ -38221,6 +55398,24 @@ #define V_RCRST(x) ((x) << S_RCRST) #define F_RCRST V_RCRST(1U) +#define A_MAC_PORT_IMPEDENCE_CALIBRATION_CONTROL 0x3880 + +#define S_FRCCAL_COMP 6 +#define V_FRCCAL_COMP(x) ((x) << S_FRCCAL_COMP) +#define F_FRCCAL_COMP V_FRCCAL_COMP(1U) + +#define S_IC_FRCERR 5 +#define V_IC_FRCERR(x) ((x) << S_IC_FRCERR) +#define F_IC_FRCERR V_IC_FRCERR(1U) + +#define S_CAL_BISTENAB 4 +#define V_CAL_BISTENAB(x) ((x) << S_CAL_BISTENAB) +#define F_CAL_BISTENAB V_CAL_BISTENAB(1U) + +#define S_RCAL_RESET 0 +#define V_RCAL_RESET(x) ((x) << S_RCAL_RESET) +#define F_RCAL_RESET V_RCAL_RESET(1U) + #define A_MAC_PORT_RESISTOR_CALIBRATION_STATUS_1 0x3884 #define S_RCERR 1 @@ -38231,6 +55426,24 @@ #define V_RCCOMP(x) ((x) << S_RCCOMP) #define F_RCCOMP V_RCCOMP(1U) +#define A_MAC_PORT_IMPEDENCE_CALIBRATION_STATUS_1 0x3884 + +#define S_RCALBENAB 3 +#define V_RCALBENAB(x) ((x) << S_RCALBENAB) +#define F_RCALBENAB V_RCALBENAB(1U) + +#define S_RCALBUSY 2 +#define V_RCALBUSY(x) ((x) << S_RCALBUSY) +#define F_RCALBUSY V_RCALBUSY(1U) + +#define S_RCALERR 1 +#define V_RCALERR(x) ((x) << S_RCALERR) +#define F_RCALERR V_RCALERR(1U) + +#define S_RCALCOMP 0 +#define V_RCALCOMP(x) ((x) << S_RCALCOMP) +#define F_RCALCOMP V_RCALCOMP(1U) + #define A_MAC_PORT_RESISTOR_CALIBRATION_STATUS_2 0x3888 #define S_RESREG2 0 @@ -38238,6 +55451,13 @@ #define V_RESREG2(x) ((x) << S_RESREG2) #define G_RESREG2(x) (((x) >> S_RESREG2) & M_RESREG2) +#define A_MAC_PORT_IMPEDENCE_CALIBRATION_STATUS_2 0x3888 + +#define S_T6_RESREG2 0 +#define M_T6_RESREG2 0x3fU +#define V_T6_RESREG2(x) ((x) << S_T6_RESREG2) +#define G_T6_RESREG2(x) (((x) >> S_T6_RESREG2) & M_T6_RESREG2) + #define A_MAC_PORT_RESISTOR_CALIBRATION_STATUS_3 0x388c #define S_RESREG3 0 @@ -38245,6 +55465,69 @@ #define V_RESREG3(x) ((x) << S_RESREG3) #define G_RESREG3(x) (((x) >> S_RESREG3) & M_RESREG3) +#define A_MAC_PORT_IMPEDENCE_CALIBRATION_STATUS_3 0x388c + +#define S_T6_RESREG3 0 +#define M_T6_RESREG3 0x3fU +#define V_T6_RESREG3(x) ((x) << S_T6_RESREG3) +#define G_T6_RESREG3(x) (((x) >> S_T6_RESREG3) & M_T6_RESREG3) + +#define A_MAC_PORT_INEQUALITY_CONTROL_AND_RESULT 0x38c0 + +#define S_ISGT 7 +#define V_ISGT(x) ((x) << S_ISGT) +#define F_ISGT V_ISGT(1U) + +#define S_ISLT 6 +#define V_ISLT(x) ((x) << S_ISLT) +#define F_ISLT V_ISLT(1U) + +#define S_ISEQ 5 +#define V_ISEQ(x) ((x) << S_ISEQ) +#define F_ISEQ V_ISEQ(1U) + +#define S_ISVAL 3 +#define M_ISVAL 0x3U +#define V_ISVAL(x) ((x) << S_ISVAL) +#define G_ISVAL(x) (((x) >> S_ISVAL) & M_ISVAL) + +#define S_GTORLT 1 +#define M_GTORLT 0x3U +#define V_GTORLT(x) ((x) << S_GTORLT) +#define G_GTORLT(x) (((x) >> S_GTORLT) & M_GTORLT) + +#define S_INEQ 0 +#define V_INEQ(x) ((x) << S_INEQ) +#define F_INEQ V_INEQ(1U) + +#define A_MAC_PORT_INEQUALITY_LOW_LIMIT 0x38c4 + +#define S_LLIM 0 +#define M_LLIM 0xffffU +#define V_LLIM(x) ((x) << S_LLIM) +#define G_LLIM(x) (((x) >> S_LLIM) & M_LLIM) + +#define A_MAC_PORT_INEQUALITY_LOW_LIMIT_MASK 0x38c8 + +#define S_LMSK 0 +#define M_LMSK 0xffffU +#define V_LMSK(x) ((x) << S_LMSK) +#define G_LMSK(x) (((x) >> S_LMSK) & M_LMSK) + +#define A_MAC_PORT_INEQUALITY_HIGH_LIMIT 0x38cc + +#define S_HLIM 0 +#define M_HLIM 0xffffU +#define V_HLIM(x) ((x) << S_HLIM) +#define G_HLIM(x) (((x) >> S_HLIM) & M_HLIM) + +#define A_MAC_PORT_INEQUALITY_HIGH_LIMIT_MASK 0x38d0 + +#define S_HMSK 0 +#define M_HMSK 0xffffU +#define V_HMSK(x) ((x) << S_HMSK) +#define G_HMSK(x) (((x) >> S_HMSK) & M_HMSK) + #define A_MAC_PORT_MACRO_TEST_CONTROL_6 0x38e8 #define S_LBIST 7 @@ -38310,85 +55593,335 @@ #define F_MACROTEST V_MACROTEST(1U) #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_CONFIGURATION_MODE 0x3900 + +#define S_T6_T5_TX_RXLOOP 5 +#define V_T6_T5_TX_RXLOOP(x) ((x) << S_T6_T5_TX_RXLOOP) +#define F_T6_T5_TX_RXLOOP V_T6_T5_TX_RXLOOP(1U) + +#define S_T6_T5_TX_BWSEL 2 +#define M_T6_T5_TX_BWSEL 0x3U +#define V_T6_T5_TX_BWSEL(x) ((x) << S_T6_T5_TX_BWSEL) +#define G_T6_T5_TX_BWSEL(x) (((x) >> S_T6_T5_TX_BWSEL) & M_T6_T5_TX_BWSEL) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TEST_CONTROL 0x3904 + +#define S_T6_ERROR 9 +#define V_T6_ERROR(x) ((x) << S_T6_ERROR) +#define F_T6_ERROR V_T6_ERROR(1U) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_COEFFICIENT_CONTROL 0x3908 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_MODE_CONTROL 0x390c #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DRIVER_OVERRIDE_CONTROL 0x3910 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_ROTATOR_OVERRIDE 0x3914 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_IMPEDANCE_CALIBRATION_OVERRIDE 0x3918 + +#define S_T6_CALSSTN 8 +#define M_T6_CALSSTN 0x3fU +#define V_T6_CALSSTN(x) ((x) << S_T6_CALSSTN) +#define G_T6_CALSSTN(x) (((x) >> S_T6_CALSSTN) & M_T6_CALSSTN) + +#define S_T6_CALSSTP 0 +#define M_T6_CALSSTP 0x3fU +#define V_T6_CALSSTP(x) ((x) << S_T6_CALSSTP) +#define G_T6_CALSSTP(x) (((x) >> S_T6_CALSSTP) & M_T6_CALSSTP) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCLK_DRIFT_TOLERANCE 0x391c + +#define S_T6_DRTOL 2 +#define M_T6_DRTOL 0x7U +#define V_T6_DRTOL(x) ((x) << S_T6_DRTOL) +#define G_T6_DRTOL(x) (((x) >> S_T6_DRTOL) & M_T6_DRTOL) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT 0x3920 + +#define S_T6_NXTT0 0 +#define M_T6_NXTT0 0x3fU +#define V_T6_NXTT0(x) ((x) << S_T6_NXTT0) +#define G_T6_NXTT0(x) (((x) >> S_T6_NXTT0) & M_T6_NXTT0) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT 0x3924 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT 0x3928 + +#define S_T6_NXTT2 0 +#define M_T6_NXTT2 0x3fU +#define V_T6_NXTT2(x) ((x) << S_T6_NXTT2) +#define G_T6_NXTT2(x) (((x) >> S_T6_NXTT2) & M_T6_NXTT2) + +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_3_COEFFICIENT 0x392c #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AMPLITUDE 0x3930 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_POLARITY 0x3934 + +#define S_T6_NXTPOL 0 +#define M_T6_NXTPOL 0xfU +#define V_T6_NXTPOL(x) ((x) << S_T6_NXTPOL) +#define G_T6_NXTPOL(x) (((x) >> S_T6_NXTPOL) & M_T6_NXTPOL) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_COMMAND 0x3938 + +#define S_T6_C0UPDT 6 +#define M_T6_C0UPDT 0x3U +#define V_T6_C0UPDT(x) ((x) << S_T6_C0UPDT) +#define G_T6_C0UPDT(x) (((x) >> S_T6_C0UPDT) & M_T6_C0UPDT) + +#define S_T6_C2UPDT 2 +#define M_T6_C2UPDT 0x3U +#define V_T6_C2UPDT(x) ((x) << S_T6_C2UPDT) +#define G_T6_C2UPDT(x) (((x) >> S_T6_C2UPDT) & M_T6_C2UPDT) + +#define S_T6_C1UPDT 0 +#define M_T6_C1UPDT 0x3U +#define V_T6_C1UPDT(x) ((x) << S_T6_C1UPDT) +#define G_T6_C1UPDT(x) (((x) >> S_T6_C1UPDT) & M_T6_C1UPDT) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_ADAPTIVE_EQUALIZATION_STATUS 0x393c + +#define S_T6_C0STAT 6 +#define M_T6_C0STAT 0x3U +#define V_T6_C0STAT(x) ((x) << S_T6_C0STAT) +#define G_T6_C0STAT(x) (((x) >> S_T6_C0STAT) & M_T6_C0STAT) + +#define S_T6_C2STAT 2 +#define M_T6_C2STAT 0x3U +#define V_T6_C2STAT(x) ((x) << S_T6_C2STAT) +#define G_T6_C2STAT(x) (((x) >> S_T6_C2STAT) & M_T6_C2STAT) + +#define S_T6_C1STAT 0 +#define M_T6_C1STAT 0x3U +#define V_T6_C1STAT(x) ((x) << S_T6_C1STAT) +#define G_T6_C1STAT(x) (((x) >> S_T6_C1STAT) & M_T6_C1STAT) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_OVERRIDE 0x3940 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_0_COEFFICIENT_OVERRIDE 0x3940 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_OVERRIDE 0x3944 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_1_COEFFICIENT_OVERRIDE 0x3944 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT_OVERRIDE 0x3948 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_2_COEFFICIENT_OVERRIDE 0x3948 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_TAP_3_COEFFICIENT_OVERRIDE 0x394c +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_APPLIED_TUNE_REGISTER 0x3950 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_ANALOG_DIAGNOSTICS_REGISTER 0x3958 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_0_COEFFICIENT_APPLIED 0x3960 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_4X_SEGMENT_APPLIED 0x3960 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_1_COEFFICIENT_APPLIED 0x3964 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_2X_SEGMENT_APPLIED 0x3964 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_2_COEFFICIENT_APPLIED 0x3968 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_1X_SEGMENT_APPLIED 0x3968 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_4X_TERMINATION_APPLIED 0x396c #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_DISABLE_APPLIED_1 0x3970 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_2X1X_TERMINATION_APPLIED 0x3970 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_DISABLE_APPLIED_2 0x3974 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_APPLIED_REGISTER 0x3974 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_DATA 0x3978 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_EXTENDED_ADDRESS_ADDR 0x397c + +#define S_T6_XADDR 1 +#define M_T6_XADDR 0x1fU +#define V_T6_XADDR(x) ((x) << S_T6_XADDR) +#define G_T6_XADDR(x) (((x) >> S_T6_XADDR) & M_T6_XADDR) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_1_0 0x3980 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_3_2 0x3984 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTE_4 0x3988 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_5_4 0x3988 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x398c +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_PATTERN_BUFFER_BYTES_7_6 0x398c #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_OVERRIDE 0x3990 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_APPLIED 0x3994 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_TIME_OUT 0x3998 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AZ_CONTROL 0x399c +#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_CONTROL 0x39a0 + +#define S_T6_DCCTIMEEN 13 +#define M_T6_DCCTIMEEN 0x3U +#define V_T6_DCCTIMEEN(x) ((x) << S_T6_DCCTIMEEN) +#define G_T6_DCCTIMEEN(x) (((x) >> S_T6_DCCTIMEEN) & M_T6_DCCTIMEEN) + +#define S_T6_DCCLOCK 11 +#define M_T6_DCCLOCK 0x3U +#define V_T6_DCCLOCK(x) ((x) << S_T6_DCCLOCK) +#define G_T6_DCCLOCK(x) (((x) >> S_T6_DCCLOCK) & M_T6_DCCLOCK) + +#define S_T6_DCCOFFSET 8 +#define M_T6_DCCOFFSET 0x7U +#define V_T6_DCCOFFSET(x) ((x) << S_T6_DCCOFFSET) +#define G_T6_DCCOFFSET(x) (((x) >> S_T6_DCCOFFSET) & M_T6_DCCOFFSET) + +#define S_TX_LINK_BCST_DCCSTEP_CTL 6 +#define M_TX_LINK_BCST_DCCSTEP_CTL 0x3U +#define V_TX_LINK_BCST_DCCSTEP_CTL(x) ((x) << S_TX_LINK_BCST_DCCSTEP_CTL) +#define G_TX_LINK_BCST_DCCSTEP_CTL(x) (((x) >> S_TX_LINK_BCST_DCCSTEP_CTL) & M_TX_LINK_BCST_DCCSTEP_CTL) + +#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_OVERRIDE 0x39a4 +#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_APPLIED 0x39a8 +#define A_T6_MAC_PORT_TX_LINK_BCST_TRANSMIT_DCC_TIME_OUT 0x39ac +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SIGN_OVERRIDE 0x39c0 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_4X_OVERRIDE 0x39c8 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_2X_OVERRIDE 0x39cc +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_SEGMENT_1X_OVERRIDE 0x39d0 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_4X_TERMINATION_OVERRIDE 0x39d8 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_2X_TERMINATION_OVERRIDE 0x39dc +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_TAP_SEGMENT_1X_TERMINATION_OVERRIDE 0x39e0 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_5 0x39ec #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_4 0x39f0 + +#define S_T6_SDOVRD 0 +#define M_T6_SDOVRD 0xffffU +#define V_T6_SDOVRD(x) ((x) << S_T6_SDOVRD) +#define G_T6_SDOVRD(x) (((x) >> S_T6_SDOVRD) & M_T6_SDOVRD) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_3 0x39f4 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_2 0x39f8 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_MACRO_TEST_CONTROL_1 0x39fc + +#define S_T6_SDOVRDEN 15 +#define V_T6_SDOVRDEN(x) ((x) << S_T6_SDOVRDEN) +#define F_T6_SDOVRDEN V_T6_SDOVRDEN(1U) + #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_CONFIGURATION_MODE 0x3a00 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_TEST_CONTROL 0x3a04 #define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_CONTROL 0x3a08 #define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_OFFSET_CONTROL 0x3a0c + +#define S_T6_TMSCAL 8 +#define M_T6_TMSCAL 0x3U +#define V_T6_TMSCAL(x) ((x) << S_T6_TMSCAL) +#define G_T6_TMSCAL(x) (((x) >> S_T6_TMSCAL) & M_T6_TMSCAL) + +#define S_T6_APADJ 7 +#define V_T6_APADJ(x) ((x) << S_T6_APADJ) +#define F_T6_APADJ V_T6_APADJ(1U) + +#define S_T6_RSEL 6 +#define V_T6_RSEL(x) ((x) << S_T6_RSEL) +#define F_T6_RSEL V_T6_RSEL(1U) + +#define S_T6_PHOFFS 0 +#define M_T6_PHOFFS 0x3fU +#define V_T6_PHOFFS(x) ((x) << S_T6_PHOFFS) +#define G_T6_PHOFFS(x) (((x) >> S_T6_PHOFFS) & M_T6_PHOFFS) + #define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_1 0x3a10 #define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_POSITION_2 0x3a14 #define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_1 0x3a18 #define A_MAC_PORT_RX_LINK_BCST_PHASE_ROTATOR_STATIC_PHASE_OFFSET_2 0x3a1c #define A_MAC_PORT_RX_LINK_BCST_DFE_CONTROL 0x3a20 + +#define S_T6_SPIFMT 8 +#define M_T6_SPIFMT 0xfU +#define V_T6_SPIFMT(x) ((x) << S_T6_SPIFMT) +#define G_T6_SPIFMT(x) (((x) >> S_T6_SPIFMT) & M_T6_SPIFMT) + #define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_1 0x3a24 #define A_MAC_PORT_RX_LINK_BCST_DFE_SAMPLE_SNAPSHOT_2 0x3a28 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_1 0x3a2c + +#define S_T6_WRAPSEL 15 +#define V_T6_WRAPSEL(x) ((x) << S_T6_WRAPSEL) +#define F_T6_WRAPSEL V_T6_WRAPSEL(1U) + +#define S_T6_PEAK 9 +#define M_T6_PEAK 0x1fU +#define V_T6_PEAK(x) ((x) << S_T6_PEAK) +#define G_T6_PEAK(x) (((x) >> S_T6_PEAK) & M_T6_PEAK) + #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_2 0x3a30 + +#define S_T6_T5VGAIN 0 +#define M_T6_T5VGAIN 0x7fU +#define V_T6_T5VGAIN(x) ((x) << S_T6_T5VGAIN) +#define G_T6_T5VGAIN(x) (((x) >> S_T6_T5VGAIN) & M_T6_T5VGAIN) + #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_VGA_CONTROL_3 0x3a34 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_1 0x3a38 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_POWER_MANAGEMENT_CONTROL 0x3a38 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_IQAMP_CONTROL_1 0x3a3c #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DQCC_CONTROL_3 0x3a40 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_IQAMP_CONTROL_2 0x3a40 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACAP_AND_DACAN_SELECTION 0x3a44 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACAP_AND_DACAN 0x3a48 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACA_MIN_AND_DACAZ 0x3a4c +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DACA_MIN 0x3a4c #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_ADAC_CONTROL 0x3a50 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_AC_COUPLING_CONTROL 0x3a54 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_AC_COUPLING_VALUE 0x3a58 #define A_MAC_PORT_RX_LINK_BCST_DFE_H1_LOCAL_OFFSET_ODD2_EVN2 0x3a5c +#define A_MAC_PORT_RX_LINK_BCST_DFE_H1H2H3_LOCAL_OFFSET 0x3a5c #define A_MAC_PORT_RX_LINK_BCST_DFE_H1_LOCAL_OFFSET_ODD3_EVN3 0x3a60 +#define A_MAC_PORT_RX_LINK_BCST_DFE_H1H2H3_LOCAL_OFFSET_VALUE 0x3a60 #define A_MAC_PORT_RX_LINK_BCST_DFE_H1_LOCAL_OFFSET_ODD4_EVN4 0x3a64 +#define A_MAC_PORT_RX_LINK_BCST_PEAKED_INTEGRATOR 0x3a64 +#define A_MAC_PORT_RX_LINK_BCST_CDR_ANALOG_SWITCH 0x3a68 +#define A_MAC_PORT_RX_LINK_BCST_PEAKING_AMPLIFIER_INTIALIZATION_CONTROL 0x3a6c #define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_AMPLITUDE_CENTERING_DAC_AND_DYNAMIC_PEAKING_CONTROL_DPC 0x3a70 #define A_MAC_PORT_RX_LINK_BCST_DYNAMIC_DATA_CENTERING_DDC 0x3a74 + +#define S_T6_ODEC 0 +#define M_T6_ODEC 0xfU +#define V_T6_ODEC(x) ((x) << S_T6_ODEC) +#define G_T6_ODEC(x) (((x) >> S_T6_ODEC) & M_T6_ODEC) + #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS 0x3a78 + +#define S_RX_LINK_BCST_ACCCMP_RIS 11 +#define V_RX_LINK_BCST_ACCCMP_RIS(x) ((x) << S_RX_LINK_BCST_ACCCMP_RIS) +#define F_RX_LINK_BCST_ACCCMP_RIS V_RX_LINK_BCST_ACCCMP_RIS(1U) + #define A_MAC_PORT_RX_LINK_BCST_DFE_FUNCTION_CONTROL_1 0x3a7c #define A_MAC_PORT_RX_LINK_BCST_DFE_FUNCTION_CONTROL_2 0x3a80 #define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_EVN1_EVN2 0x3a84 +#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_CHANNEL 0x3a84 #define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_ODD1_ODD2 0x3a88 +#define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_VALUE 0x3a88 #define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_EVN3_EVN4 0x3a8c +#define A_MAC_PORT_RX_LINK_BCST_H_COEFFICIENBT_BIST 0x3a8c #define A_MAC_PORT_RX_LINK_BCST_DFE_OFFSET_ODD3_ODD4 0x3a90 +#define A_MAC_PORT_RX_LINK_BCST_AC_CAPACITOR_BIST 0x3a90 + +#define S_RX_LINK_BCST_ACCCMP_BIST 13 +#define V_RX_LINK_BCST_ACCCMP_BIST(x) ((x) << S_RX_LINK_BCST_ACCCMP_BIST) +#define F_RX_LINK_BCST_ACCCMP_BIST V_RX_LINK_BCST_ACCCMP_BIST(1U) + #define A_MAC_PORT_RX_LINK_BCST_DFE_E0_AND_E1_OFFSET 0x3a94 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_LOFF_CONTROL 0x3a98 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_LOFF_CONTROL_REGISTER 0x3a98 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_SIGDET_CONTROL 0x3a9c #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_ANALOG_CONTROL_SWITCH 0x3aa0 #define A_MAC_PORT_RX_LINK_BCST_INTEGRATOR_DAC_OFFSET 0x3aa4 #define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_CONTROL 0x3aa8 #define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS 0x3aac + +#define S_T6_EMMD 3 +#define M_T6_EMMD 0x3U +#define V_T6_EMMD(x) ((x) << S_T6_EMMD) +#define G_T6_EMMD(x) (((x) >> S_T6_EMMD) & M_T6_EMMD) + +#define S_T6_EMBRDY 2 +#define V_T6_EMBRDY(x) ((x) << S_T6_EMBRDY) +#define F_T6_EMBRDY V_T6_EMBRDY(1U) + +#define S_T6_EMBUMP 1 +#define V_T6_EMBUMP(x) ((x) << S_T6_EMBUMP) +#define F_T6_EMBUMP V_T6_EMBUMP(1U) + #define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_ERROR_COUNT 0x3ab0 #define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PDF_EYE_COUNT 0x3ab4 #define A_MAC_PORT_RX_LINK_BCST_DIGITAL_EYE_METRICS_PATTERN_LENGTH 0x3ab8 +#define A_MAC_PORT_RX_LINK_BCST_DFE_FUNCTION_CONTROL_3 0x3abc #define A_MAC_PORT_RX_LINK_BCST_DFE_TAP_ENABLE 0x3ac0 +#define A_MAC_PORT_RX_LINK_BCST_DFE_TAP_CONTROL 0x3ac0 + +#define S_RX_LINK_BCST_INDEX_DFE_TC 0 +#define M_RX_LINK_BCST_INDEX_DFE_TC 0xfU +#define V_RX_LINK_BCST_INDEX_DFE_TC(x) ((x) << S_RX_LINK_BCST_INDEX_DFE_TC) +#define G_RX_LINK_BCST_INDEX_DFE_TC(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_TC) & M_RX_LINK_BCST_INDEX_DFE_TC) + #define A_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3ac4 +#define A_MAC_PORT_RX_LINK_BCST_DFE_TAP 0x3ac4 + +#define S_RX_LINK_BCST_INDEX_DFE_TAP 0 +#define M_RX_LINK_BCST_INDEX_DFE_TAP 0xfU +#define V_RX_LINK_BCST_INDEX_DFE_TAP(x) ((x) << S_RX_LINK_BCST_INDEX_DFE_TAP) +#define G_RX_LINK_BCST_INDEX_DFE_TAP(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_TAP) & M_RX_LINK_BCST_INDEX_DFE_TAP) + #define A_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3ac8 #define A_MAC_PORT_RX_LINK_BCST_DFE_H3 0x3acc #define A_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3ad0 @@ -38397,13 +55930,39 @@ #define A_MAC_PORT_RX_LINK_BCST_DFE_H8_AND_H9 0x3adc #define A_MAC_PORT_RX_LINK_BCST_DFE_H10_AND_H11 0x3ae0 #define A_MAC_PORT_RX_LINK_BCST_DFE_H12 0x3ae4 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_INTERNAL_STATUS_2 0x3ae4 +#define A_MAC_PORT_RX_LINK_BCST_AC_COUPLING_CURRENT_SOURCE_ADJUST 0x3ae8 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DCD_CONTROL 0x3aec +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_DCC_CONTROL 0x3af0 + +#define S_RX_LINK_BCST_DCCSTEP_RXCTL 10 +#define M_RX_LINK_BCST_DCCSTEP_RXCTL 0x3U +#define V_RX_LINK_BCST_DCCSTEP_RXCTL(x) ((x) << S_RX_LINK_BCST_DCCSTEP_RXCTL) +#define G_RX_LINK_BCST_DCCSTEP_RXCTL(x) (((x) >> S_RX_LINK_BCST_DCCSTEP_RXCTL) & M_RX_LINK_BCST_DCCSTEP_RXCTL) + +#define S_RX_LINK_BCST_DCCLOCK_RXCTL 8 +#define V_RX_LINK_BCST_DCCLOCK_RXCTL(x) ((x) << S_RX_LINK_BCST_DCCLOCK_RXCTL) +#define F_RX_LINK_BCST_DCCLOCK_RXCTL V_RX_LINK_BCST_DCCLOCK_RXCTL(1U) + +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_QCC_CONTROL 0x3af4 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_MACRO_TEST_CONTROL_2 0x3af8 +#define A_MAC_PORT_RX_LINK_BCST_RECEIVER_MACRO_TEST_CONTROL_REGISTER_2 0x3af8 #define A_MAC_PORT_RX_LINK_BCST_RECEIVER_MACRO_TEST_CONTROL_1 0x3afc #define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_0 0x3b00 #define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_1 0x3b04 #define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_2 0x3b08 #define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_3 0x3b0c #define A_MAC_PORT_PLLA_VCO_COARSE_CALIBRATION_4 0x3b10 +#define A_MAC_PORT_PLLA_POWER_CONTROL 0x3b24 + +#define S_SPWRENA 1 +#define V_SPWRENA(x) ((x) << S_SPWRENA) +#define F_SPWRENA V_SPWRENA(1U) + +#define S_NPWRENA 0 +#define V_NPWRENA(x) ((x) << S_NPWRENA) +#define F_NPWRENA V_NPWRENA(1U) + #define A_MAC_PORT_PLLA_CHARGE_PUMP_CONTROL 0x3b28 #define S_T5CPISEL 0 @@ -38411,6 +55970,7 @@ #define V_T5CPISEL(x) ((x) << S_T5CPISEL) #define G_T5CPISEL(x) (((x) >> S_T5CPISEL) & M_T5CPISEL) +#define A_MAC_PORT_PLLA_PLL_MICELLANEOUS_CONTROL 0x3b38 #define A_MAC_PORT_PLLA_PCLK_CONTROL 0x3b3c #define S_SPEDIV 3 @@ -38472,6 +56032,15 @@ #define V_VBST(x) ((x) << S_VBST) #define G_VBST(x) (((x) >> S_VBST) & M_VBST) +#define S_PLLDIVA 4 +#define V_PLLDIVA(x) ((x) << S_PLLDIVA) +#define F_PLLDIVA V_PLLDIVA(1U) + +#define S_REFDIV 0 +#define M_REFDIV 0xfU +#define V_REFDIV(x) ((x) << S_REFDIV) +#define G_REFDIV(x) (((x) >> S_REFDIV) & M_REFDIV) + #define A_MAC_PORT_PLLA_MACRO_TEST_CONTROL_3 0x3bf4 #define S_RESYNC 6 @@ -38521,7 +56090,9 @@ #define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_2 0x3c08 #define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_3 0x3c0c #define A_MAC_PORT_PLLB_VCO_COARSE_CALIBRATION_4 0x3c10 +#define A_MAC_PORT_PLLB_POWER_CONTROL 0x3c24 #define A_MAC_PORT_PLLB_CHARGE_PUMP_CONTROL 0x3c28 +#define A_MAC_PORT_PLLB_PLL_MICELLANEOUS_CONTROL 0x3c38 #define A_MAC_PORT_PLLB_PCLK_CONTROL 0x3c3c #define A_MAC_PORT_PLLB_EYE_METRICS_INTERVAL_CONTROL 0x3c40 #define A_MAC_PORT_PLLB_EYE_METRICS_INTERVAL_LIMIT_1 0x3c44 @@ -38539,6 +56110,7 @@ #define V_STEP(x) ((x) << S_STEP) #define G_STEP(x) (((x) >> S_STEP) & M_STEP) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0 #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8 #define S_C0INIT 0 @@ -38546,6 +56118,16 @@ #define V_C0INIT(x) ((x) << S_C0INIT) #define G_C0INIT(x) (((x) >> S_C0INIT) & M_C0INIT) +#define S_C0PRESET 8 +#define M_C0PRESET 0x7fU +#define V_C0PRESET(x) ((x) << S_C0PRESET) +#define G_C0PRESET(x) (((x) >> S_C0PRESET) & M_C0PRESET) + +#define S_C0INIT1 0 +#define M_C0INIT1 0x7fU +#define V_C0INIT1(x) ((x) << S_C0INIT1) +#define G_C0INIT1(x) (((x) >> S_C0INIT1) & M_C0INIT1) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10 #define S_C0MAX 8 @@ -38558,6 +56140,18 @@ #define V_C0MIN(x) ((x) << S_C0MIN) #define G_C0MIN(x) (((x) >> S_C0MIN) & M_C0MIN) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10 + +#define S_T6_C0MAX 8 +#define M_T6_C0MAX 0x7fU +#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX) +#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX) + +#define S_T6_C0MIN 0 +#define M_T6_C0MIN 0x7fU +#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN) +#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18 #define S_C1INIT 0 @@ -38565,6 +56159,18 @@ #define V_C1INIT(x) ((x) << S_C1INIT) #define G_C1INIT(x) (((x) >> S_C1INIT) & M_C1INIT) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C1_INIT_EXTENDED 0x18 + +#define S_C1PRESET 8 +#define M_C1PRESET 0x7fU +#define V_C1PRESET(x) ((x) << S_C1PRESET) +#define G_C1PRESET(x) (((x) >> S_C1PRESET) & M_C1PRESET) + +#define S_C1INIT1 0 +#define M_C1INIT1 0x7fU +#define V_C1INIT1(x) ((x) << S_C1INIT1) +#define G_C1INIT1(x) (((x) >> S_C1INIT1) & M_C1INIT1) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20 #define S_C1MAX 8 @@ -38577,6 +56183,7 @@ #define V_C1MIN(x) ((x) << S_C1MIN) #define G_C1MIN(x) (((x) >> S_C1MIN) & M_C1MIN) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20 #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28 #define S_C2INIT 0 @@ -38584,6 +56191,18 @@ #define V_C2INIT(x) ((x) << S_C2INIT) #define G_C2INIT(x) (((x) >> S_C2INIT) & M_C2INIT) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C2_INIT_EXTENDED 0x28 + +#define S_C2PRESET 8 +#define M_C2PRESET 0x7fU +#define V_C2PRESET(x) ((x) << S_C2PRESET) +#define G_C2PRESET(x) (((x) >> S_C2PRESET) & M_C2PRESET) + +#define S_C2INIT1 0 +#define M_C2INIT1 0x7fU +#define V_C2INIT1(x) ((x) << S_C2INIT1) +#define G_C2INIT1(x) (((x) >> S_C2INIT1) & M_C2INIT1) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30 #define S_C2MAX 8 @@ -38596,6 +56215,18 @@ #define V_C2MIN(x) ((x) << S_C2MIN) #define G_C2MIN(x) (((x) >> S_C2MIN) & M_C2MIN) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30 + +#define S_T6_C2MAX 8 +#define M_T6_C2MAX 0x7fU +#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX) +#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX) + +#define S_T6_C2MIN 0 +#define M_T6_C2MIN 0x7fU +#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN) +#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN) + #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38 #define S_VMMAX 0 @@ -38603,6 +56234,7 @@ #define V_VMMAX(x) ((x) << S_VMMAX) #define G_VMMAX(x) (((x) >> S_VMMAX) & M_VMMAX) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38 #define A_MAC_PORT_TX_LINKA_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40 #define S_V2MIN 0 @@ -38610,42 +56242,507 @@ #define V_V2MIN(x) ((x) << S_V2MIN) #define G_V2MIN(x) (((x) >> S_V2MIN) & M_V2MIN) +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C3_INIT_EXTENDED 0x48 + +#define S_C3PRESET 8 +#define M_C3PRESET 0x7fU +#define V_C3PRESET(x) ((x) << S_C3PRESET) +#define G_C3PRESET(x) (((x) >> S_C3PRESET) & M_C3PRESET) + +#define S_C3INIT1 0 +#define M_C3INIT1 0x7fU +#define V_C3INIT1(x) ((x) << S_C3INIT1) +#define G_C3INIT1(x) (((x) >> S_C3INIT1) & M_C3INIT1) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50 + +#define S_C3MAX 8 +#define M_C3MAX 0x7fU +#define V_C3MAX(x) ((x) << S_C3MAX) +#define G_C3MAX(x) (((x) >> S_C3MAX) & M_C3MAX) + +#define S_C3MIN 0 +#define M_C3MIN 0x7fU +#define V_C3MIN(x) ((x) << S_C3MIN) +#define G_C3MIN(x) (((x) >> S_C3MIN) & M_C3MIN) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c + +#define S_C0INIT2 0 +#define M_C0INIT2 0x7fU +#define V_C0INIT2(x) ((x) << S_C0INIT2) +#define G_C0INIT2(x) (((x) >> S_C0INIT2) & M_C0INIT2) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60 + +#define S_C1INIT2 0 +#define M_C1INIT2 0x7fU +#define V_C1INIT2(x) ((x) << S_C1INIT2) +#define G_C1INIT2(x) (((x) >> S_C1INIT2) & M_C1INIT2) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68 + +#define S_C2INIT2 0 +#define M_C2INIT2 0x7fU +#define V_C2INIT2(x) ((x) << S_C2INIT2) +#define G_C2INIT2(x) (((x) >> S_C2INIT2) & M_C2INIT2) + +#define A_MAC_PORT_TX_LINKA_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70 + +#define S_C3INIT2 0 +#define M_C3INIT2 0x7fU +#define V_C3INIT2(x) ((x) << S_C3INIT2) +#define G_C3INIT2(x) (((x) >> S_C3INIT2) & M_C3INIT2) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0 #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8 #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10 + +#define S_T6_C0MAX 8 +#define M_T6_C0MAX 0x7fU +#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX) +#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX) + +#define S_T6_C0MIN 0 +#define M_T6_C0MIN 0x7fU +#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN) +#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT_EXTENDED 0x18 #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20 #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT_EXTENDED 0x28 #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30 + +#define S_T6_C2MAX 8 +#define M_T6_C2MAX 0x7fU +#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX) +#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX) + +#define S_T6_C2MIN 0 +#define M_T6_C2MIN 0x7fU +#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN) +#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN) + #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38 #define A_MAC_PORT_TX_LINKB_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C3_INIT_EXTENDED 0x48 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68 +#define A_MAC_PORT_TX_LINKB_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10 + +#define S_T6_C0MAX 8 +#define M_T6_C0MAX 0x7fU +#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX) +#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX) + +#define S_T6_C0MIN 0 +#define M_T6_C0MIN 0x7fU +#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN) +#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT_EXTENDED 0x18 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT_EXTENDED 0x28 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30 + +#define S_T6_C2MAX 8 +#define M_T6_C2MAX 0x7fU +#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX) +#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX) + +#define S_T6_C2MIN 0 +#define M_T6_C2MIN 0x7fU +#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN) +#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN) + #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38 #define A_MAC_PORT_TX_LINKC_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C3_INIT_EXTENDED 0x48 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68 +#define A_MAC_PORT_TX_LINKC_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10 + +#define S_T6_C0MAX 8 +#define M_T6_C0MAX 0x7fU +#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX) +#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX) + +#define S_T6_C0MIN 0 +#define M_T6_C0MIN 0x7fU +#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN) +#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT_EXTENDED 0x18 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT_EXTENDED 0x28 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30 + +#define S_T6_C2MAX 8 +#define M_T6_C2MAX 0x7fU +#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX) +#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX) + +#define S_T6_C2MIN 0 +#define M_T6_C2MIN 0x7fU +#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN) +#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN) + #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38 #define A_MAC_PORT_TX_LINKD_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C3_INIT_EXTENDED 0x48 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68 +#define A_MAC_PORT_TX_LINKD_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_STEP_SIZE_EXTENDED 0x0 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_STEP_SIZE_EXTENDED 0x0 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_INIT_EXTENDED 0x8 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C0_LIMIT_EXTENDED 0x10 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_LIMIT_EXTENDED 0x10 + +#define S_T6_C0MAX 8 +#define M_T6_C0MAX 0x7fU +#define V_T6_C0MAX(x) ((x) << S_T6_C0MAX) +#define G_T6_C0MAX(x) (((x) >> S_T6_C0MAX) & M_T6_C0MAX) + +#define S_T6_C0MIN 0 +#define M_T6_C0MIN 0x7fU +#define V_T6_C0MIN(x) ((x) << S_T6_C0MIN) +#define G_T6_C0MIN(x) (((x) >> S_T6_C0MIN) & M_T6_C0MIN) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_INIT_EXTENDED 0x18 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT_EXTENDED 0x18 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C1_LIMIT_EXTENDED 0x20 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_LIMIT_EXTENDED 0x20 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_INIT_EXTENDED 0x28 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT_EXTENDED 0x28 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_C2_LIMIT_EXTENDED 0x30 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_LIMIT_EXTENDED 0x30 + +#define S_T6_C2MAX 8 +#define M_T6_C2MAX 0x7fU +#define V_T6_C2MAX(x) ((x) << S_T6_C2MAX) +#define G_T6_C2MAX(x) (((x) >> S_T6_C2MAX) & M_T6_C2MAX) + +#define S_T6_C2MIN 0 +#define M_T6_C2MIN 0x7fU +#define V_T6_C2MIN(x) ((x) << S_T6_C2MIN) +#define G_T6_C2MIN(x) (((x) >> S_T6_C2MIN) & M_T6_C2MIN) + #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_VM_LIMIT_EXTENDED 0x38 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_VM_LIMIT_EXTENDED 0x38 #define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_802_3AP_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_V2_LIMIT_EXTENDED 0x40 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C3_INIT_EXTENDED 0x48 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C3_LIMIT_EXTENDED 0x50 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C0_INIT2_EXTENDED 0x5c +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C1_INIT2_EXTENDED 0x60 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C2_INIT2_EXTENDED 0x68 +#define A_MAC_PORT_TX_LINK_BCST_TRANSMIT_AE_C3_INIT2_EXTENDED 0x70 +#define A_T6_MAC_PORT_RX_LINKA_DFE_TAP_ENABLE 0x2a00 + +#define S_RX_LINKA_INDEX_DFE_EN 1 +#define M_RX_LINKA_INDEX_DFE_EN 0x7fffU +#define V_RX_LINKA_INDEX_DFE_EN(x) ((x) << S_RX_LINKA_INDEX_DFE_EN) +#define G_RX_LINKA_INDEX_DFE_EN(x) (((x) >> S_RX_LINKA_INDEX_DFE_EN) & M_RX_LINKA_INDEX_DFE_EN) + +#define A_T6_MAC_PORT_RX_LINKA_DFE_H1 0x2a04 + +#define S_T6_H1OSN 13 +#define M_T6_H1OSN 0x7U +#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN) +#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN) + +#define S_T6_H1OMAG 8 +#define M_T6_H1OMAG 0x1fU +#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG) +#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG) + +#define A_T6_MAC_PORT_RX_LINKA_DFE_H2 0x2a08 +#define A_T6_MAC_PORT_RX_LINKA_DFE_H3 0x2a0c +#define A_T6_MAC_PORT_RX_LINKA_DFE_H4 0x2a10 + +#define S_H4SN 4 +#define M_H4SN 0x3U +#define V_H4SN(x) ((x) << S_H4SN) +#define G_H4SN(x) (((x) >> S_H4SN) & M_H4SN) + +#define S_H4MAG 0 +#define M_H4MAG 0xfU +#define V_H4MAG(x) ((x) << S_H4MAG) +#define G_H4MAG(x) (((x) >> S_H4MAG) & M_H4MAG) + +#define A_T6_MAC_PORT_RX_LINKA_DFE_H5 0x2a14 + +#define S_H5GS 6 +#define M_H5GS 0x3U +#define V_H5GS(x) ((x) << S_H5GS) +#define G_H5GS(x) (((x) >> S_H5GS) & M_H5GS) + +#define S_H5SN 4 +#define M_H5SN 0x3U +#define V_H5SN(x) ((x) << S_H5SN) +#define G_H5SN(x) (((x) >> S_H5SN) & M_H5SN) + +#define S_H5MAG 0 +#define M_H5MAG 0xfU +#define V_H5MAG(x) ((x) << S_H5MAG) +#define G_H5MAG(x) (((x) >> S_H5MAG) & M_H5MAG) + +#define A_T6_MAC_PORT_RX_LINKA_DFE_H6_AND_H7 0x2a18 + +#define S_H7SN 12 +#define M_H7SN 0x3U +#define V_H7SN(x) ((x) << S_H7SN) +#define G_H7SN(x) (((x) >> S_H7SN) & M_H7SN) + +#define S_H6SN 4 +#define M_H6SN 0x3U +#define V_H6SN(x) ((x) << S_H6SN) +#define G_H6SN(x) (((x) >> S_H6SN) & M_H6SN) + +#define A_T6_MAC_PORT_RX_LINKA_DFE_H8_AND_H9 0x2a1c + +#define S_H9SN 12 +#define M_H9SN 0x3U +#define V_H9SN(x) ((x) << S_H9SN) +#define G_H9SN(x) (((x) >> S_H9SN) & M_H9SN) + +#define S_H8SN 4 +#define M_H8SN 0x3U +#define V_H8SN(x) ((x) << S_H8SN) +#define G_H8SN(x) (((x) >> S_H8SN) & M_H8SN) + +#define A_T6_MAC_PORT_RX_LINKA_DFE_H10_AND_H11 0x2a20 + +#define S_H11SN 12 +#define M_H11SN 0x3U +#define V_H11SN(x) ((x) << S_H11SN) +#define G_H11SN(x) (((x) >> S_H11SN) & M_H11SN) + +#define S_H10SN 4 +#define M_H10SN 0x3U +#define V_H10SN(x) ((x) << S_H10SN) +#define G_H10SN(x) (((x) >> S_H10SN) & M_H10SN) + +#define A_MAC_PORT_RX_LINKA_DFE_H12_13 0x2a24 + +#define S_H13GS 13 +#define M_H13GS 0x7U +#define V_H13GS(x) ((x) << S_H13GS) +#define G_H13GS(x) (((x) >> S_H13GS) & M_H13GS) + +#define S_H13SN 10 +#define M_H13SN 0x7U +#define V_H13SN(x) ((x) << S_H13SN) +#define G_H13SN(x) (((x) >> S_H13SN) & M_H13SN) + +#define S_H13MAG 8 +#define M_H13MAG 0x3U +#define V_H13MAG(x) ((x) << S_H13MAG) +#define G_H13MAG(x) (((x) >> S_H13MAG) & M_H13MAG) + +#define S_H12SN 4 +#define M_H12SN 0x3U +#define V_H12SN(x) ((x) << S_H12SN) +#define G_H12SN(x) (((x) >> S_H12SN) & M_H12SN) + +#define A_MAC_PORT_RX_LINKA_DFE_H14_15 0x2a28 + +#define S_H15GS 13 +#define M_H15GS 0x7U +#define V_H15GS(x) ((x) << S_H15GS) +#define G_H15GS(x) (((x) >> S_H15GS) & M_H15GS) + +#define S_H15SN 10 +#define M_H15SN 0x7U +#define V_H15SN(x) ((x) << S_H15SN) +#define G_H15SN(x) (((x) >> S_H15SN) & M_H15SN) + +#define S_H15MAG 8 +#define M_H15MAG 0x3U +#define V_H15MAG(x) ((x) << S_H15MAG) +#define G_H15MAG(x) (((x) >> S_H15MAG) & M_H15MAG) + +#define S_H14GS 6 +#define M_H14GS 0x3U +#define V_H14GS(x) ((x) << S_H14GS) +#define G_H14GS(x) (((x) >> S_H14GS) & M_H14GS) + +#define S_H14SN 4 +#define M_H14SN 0x3U +#define V_H14SN(x) ((x) << S_H14SN) +#define G_H14SN(x) (((x) >> S_H14SN) & M_H14SN) + +#define S_H14MAG 0 +#define M_H14MAG 0xfU +#define V_H14MAG(x) ((x) << S_H14MAG) +#define G_H14MAG(x) (((x) >> S_H14MAG) & M_H14MAG) + +#define A_MAC_PORT_RX_LINKA_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2a2c + +#define S_H1ODELTA 8 +#define M_H1ODELTA 0x1fU +#define V_H1ODELTA(x) ((x) << S_H1ODELTA) +#define G_H1ODELTA(x) (((x) >> S_H1ODELTA) & M_H1ODELTA) + +#define S_H1EDELTA 0 +#define M_H1EDELTA 0x3fU +#define V_H1EDELTA(x) ((x) << S_H1EDELTA) +#define G_H1EDELTA(x) (((x) >> S_H1EDELTA) & M_H1EDELTA) + +#define A_T6_MAC_PORT_RX_LINKB_DFE_TAP_ENABLE 0x2b00 + +#define S_RX_LINKB_INDEX_DFE_EN 1 +#define M_RX_LINKB_INDEX_DFE_EN 0x7fffU +#define V_RX_LINKB_INDEX_DFE_EN(x) ((x) << S_RX_LINKB_INDEX_DFE_EN) +#define G_RX_LINKB_INDEX_DFE_EN(x) (((x) >> S_RX_LINKB_INDEX_DFE_EN) & M_RX_LINKB_INDEX_DFE_EN) + +#define A_T6_MAC_PORT_RX_LINKB_DFE_H1 0x2b04 + +#define S_T6_H1OSN 13 +#define M_T6_H1OSN 0x7U +#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN) +#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN) + +#define S_T6_H1OMAG 8 +#define M_T6_H1OMAG 0x1fU +#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG) +#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG) + +#define A_T6_MAC_PORT_RX_LINKB_DFE_H2 0x2b08 +#define A_T6_MAC_PORT_RX_LINKB_DFE_H3 0x2b0c +#define A_T6_MAC_PORT_RX_LINKB_DFE_H4 0x2b10 +#define A_T6_MAC_PORT_RX_LINKB_DFE_H5 0x2b14 +#define A_T6_MAC_PORT_RX_LINKB_DFE_H6_AND_H7 0x2b18 +#define A_T6_MAC_PORT_RX_LINKB_DFE_H8_AND_H9 0x2b1c +#define A_T6_MAC_PORT_RX_LINKB_DFE_H10_AND_H11 0x2b20 +#define A_MAC_PORT_RX_LINKB_DFE_H12_13 0x2b24 +#define A_MAC_PORT_RX_LINKB_DFE_H14_15 0x2b28 +#define A_MAC_PORT_RX_LINKB_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2b2c +#define A_T6_MAC_PORT_RX_LINKC_DFE_TAP_ENABLE 0x2e00 + +#define S_RX_LINKC_INDEX_DFE_EN 1 +#define M_RX_LINKC_INDEX_DFE_EN 0x7fffU +#define V_RX_LINKC_INDEX_DFE_EN(x) ((x) << S_RX_LINKC_INDEX_DFE_EN) +#define G_RX_LINKC_INDEX_DFE_EN(x) (((x) >> S_RX_LINKC_INDEX_DFE_EN) & M_RX_LINKC_INDEX_DFE_EN) + +#define A_T6_MAC_PORT_RX_LINKC_DFE_H1 0x2e04 + +#define S_T6_H1OSN 13 +#define M_T6_H1OSN 0x7U +#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN) +#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN) + +#define S_T6_H1OMAG 8 +#define M_T6_H1OMAG 0x1fU +#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG) +#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG) + +#define A_T6_MAC_PORT_RX_LINKC_DFE_H2 0x2e08 +#define A_T6_MAC_PORT_RX_LINKC_DFE_H3 0x2e0c +#define A_T6_MAC_PORT_RX_LINKC_DFE_H4 0x2e10 +#define A_T6_MAC_PORT_RX_LINKC_DFE_H5 0x2e14 +#define A_T6_MAC_PORT_RX_LINKC_DFE_H6_AND_H7 0x2e18 +#define A_T6_MAC_PORT_RX_LINKC_DFE_H8_AND_H9 0x2e1c +#define A_T6_MAC_PORT_RX_LINKC_DFE_H10_AND_H11 0x2e20 +#define A_MAC_PORT_RX_LINKC_DFE_H12_13 0x2e24 +#define A_MAC_PORT_RX_LINKC_DFE_H14_15 0x2e28 +#define A_MAC_PORT_RX_LINKC_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2e2c +#define A_T6_MAC_PORT_RX_LINKD_DFE_TAP_ENABLE 0x2f00 + +#define S_RX_LINKD_INDEX_DFE_EN 1 +#define M_RX_LINKD_INDEX_DFE_EN 0x7fffU +#define V_RX_LINKD_INDEX_DFE_EN(x) ((x) << S_RX_LINKD_INDEX_DFE_EN) +#define G_RX_LINKD_INDEX_DFE_EN(x) (((x) >> S_RX_LINKD_INDEX_DFE_EN) & M_RX_LINKD_INDEX_DFE_EN) + +#define A_T6_MAC_PORT_RX_LINKD_DFE_H1 0x2f04 + +#define S_T6_H1OSN 13 +#define M_T6_H1OSN 0x7U +#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN) +#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN) + +#define S_T6_H1OMAG 8 +#define M_T6_H1OMAG 0x1fU +#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG) +#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG) + +#define A_T6_MAC_PORT_RX_LINKD_DFE_H2 0x2f08 +#define A_T6_MAC_PORT_RX_LINKD_DFE_H3 0x2f0c +#define A_T6_MAC_PORT_RX_LINKD_DFE_H4 0x2f10 +#define A_T6_MAC_PORT_RX_LINKD_DFE_H5 0x2f14 +#define A_T6_MAC_PORT_RX_LINKD_DFE_H6_AND_H7 0x2f18 +#define A_T6_MAC_PORT_RX_LINKD_DFE_H8_AND_H9 0x2f1c +#define A_T6_MAC_PORT_RX_LINKD_DFE_H10_AND_H11 0x2f20 +#define A_MAC_PORT_RX_LINKD_DFE_H12_13 0x2f24 +#define A_MAC_PORT_RX_LINKD_DFE_H14_15 0x2f28 +#define A_MAC_PORT_RX_LINKD_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x2f2c +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_TAP_ENABLE 0x3200 + +#define S_RX_LINK_BCST_INDEX_DFE_EN 1 +#define M_RX_LINK_BCST_INDEX_DFE_EN 0x7fffU +#define V_RX_LINK_BCST_INDEX_DFE_EN(x) ((x) << S_RX_LINK_BCST_INDEX_DFE_EN) +#define G_RX_LINK_BCST_INDEX_DFE_EN(x) (((x) >> S_RX_LINK_BCST_INDEX_DFE_EN) & M_RX_LINK_BCST_INDEX_DFE_EN) + +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H1 0x3204 + +#define S_T6_H1OSN 13 +#define M_T6_H1OSN 0x7U +#define V_T6_H1OSN(x) ((x) << S_T6_H1OSN) +#define G_T6_H1OSN(x) (((x) >> S_T6_H1OSN) & M_T6_H1OSN) + +#define S_T6_H1OMAG 8 +#define M_T6_H1OMAG 0x1fU +#define V_T6_H1OMAG(x) ((x) << S_T6_H1OMAG) +#define G_T6_H1OMAG(x) (((x) >> S_T6_H1OMAG) & M_T6_H1OMAG) + +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H2 0x3208 +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H3 0x320c +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H4 0x3210 +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H5 0x3214 +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H6_AND_H7 0x3218 +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H8_AND_H9 0x321c +#define A_T6_MAC_PORT_RX_LINK_BCST_DFE_H10_AND_H11 0x3220 +#define A_MAC_PORT_RX_LINK_BCST_DFE_H12_13 0x3224 +#define A_MAC_PORT_RX_LINK_BCST_DFE_H14_15 0x3228 +#define A_MAC_PORT_RX_LINK_BCST_DFE_H1ODD_DELTA_AND_H1EVEN_DELTA 0x322c /* registers for module MC_0 */ #define MC_0_BASE_ADDR 0x40000 @@ -38686,6 +56783,28 @@ #define V_CMD_OPCODE0(x) ((x) << S_CMD_OPCODE0) #define G_CMD_OPCODE0(x) (((x) >> S_CMD_OPCODE0) & M_CMD_OPCODE0) +#define A_MC_LMC_MCSTAT 0x40040 + +#define S_INIT_COMPLETE 31 +#define V_INIT_COMPLETE(x) ((x) << S_INIT_COMPLETE) +#define F_INIT_COMPLETE V_INIT_COMPLETE(1U) + +#define S_SELF_REF_MODE 30 +#define V_SELF_REF_MODE(x) ((x) << S_SELF_REF_MODE) +#define F_SELF_REF_MODE V_SELF_REF_MODE(1U) + +#define S_IDLE 29 +#define V_IDLE(x) ((x) << S_IDLE) +#define F_IDLE V_IDLE(1U) + +#define S_T6_DFI_INIT_COMPLETE 28 +#define V_T6_DFI_INIT_COMPLETE(x) ((x) << S_T6_DFI_INIT_COMPLETE) +#define F_T6_DFI_INIT_COMPLETE V_T6_DFI_INIT_COMPLETE(1U) + +#define S_PREFILL_COMPLETE 27 +#define V_PREFILL_COMPLETE(x) ((x) << S_PREFILL_COMPLETE) +#define F_PREFILL_COMPLETE V_PREFILL_COMPLETE(1U) + #define A_MC_UPCTL_POWCTL 0x40044 #define A_MC_UPCTL_POWSTAT 0x40048 #define A_MC_UPCTL_CMDTSTAT 0x4004c @@ -38796,7 +56915,163 @@ #define V_CKE_OR_EN(x) ((x) << S_CKE_OR_EN) #define F_CKE_OR_EN V_CKE_OR_EN(1U) +#define A_MC_LMC_MCOPT1 0x40080 + +#define S_MC_PROTOCOL 31 +#define V_MC_PROTOCOL(x) ((x) << S_MC_PROTOCOL) +#define F_MC_PROTOCOL V_MC_PROTOCOL(1U) + +#define S_DM_ENABLE 30 +#define V_DM_ENABLE(x) ((x) << S_DM_ENABLE) +#define F_DM_ENABLE V_DM_ENABLE(1U) + +#define S_T6_ECC_EN 29 +#define V_T6_ECC_EN(x) ((x) << S_T6_ECC_EN) +#define F_T6_ECC_EN V_T6_ECC_EN(1U) + +#define S_ECC_COR 28 +#define V_ECC_COR(x) ((x) << S_ECC_COR) +#define F_ECC_COR V_ECC_COR(1U) + +#define S_RDIMM 27 +#define V_RDIMM(x) ((x) << S_RDIMM) +#define F_RDIMM V_RDIMM(1U) + +#define S_PMUM 25 +#define M_PMUM 0x3U +#define V_PMUM(x) ((x) << S_PMUM) +#define G_PMUM(x) (((x) >> S_PMUM) & M_PMUM) + +#define S_WIDTH0 24 +#define V_WIDTH0(x) ((x) << S_WIDTH0) +#define F_WIDTH0 V_WIDTH0(1U) + +#define S_PORT_ID_CHK_EN 23 +#define V_PORT_ID_CHK_EN(x) ((x) << S_PORT_ID_CHK_EN) +#define F_PORT_ID_CHK_EN V_PORT_ID_CHK_EN(1U) + +#define S_UIOS 22 +#define V_UIOS(x) ((x) << S_UIOS) +#define F_UIOS V_UIOS(1U) + +#define S_QUADCS_RDIMM 21 +#define V_QUADCS_RDIMM(x) ((x) << S_QUADCS_RDIMM) +#define F_QUADCS_RDIMM V_QUADCS_RDIMM(1U) + +#define S_ZQCL_EN 20 +#define V_ZQCL_EN(x) ((x) << S_ZQCL_EN) +#define F_ZQCL_EN V_ZQCL_EN(1U) + +#define S_WIDTH1 19 +#define V_WIDTH1(x) ((x) << S_WIDTH1) +#define F_WIDTH1 V_WIDTH1(1U) + +#define S_WD_DLY 18 +#define V_WD_DLY(x) ((x) << S_WD_DLY) +#define F_WD_DLY V_WD_DLY(1U) + +#define S_QDEPTH 16 +#define M_QDEPTH 0x3U +#define V_QDEPTH(x) ((x) << S_QDEPTH) +#define G_QDEPTH(x) (((x) >> S_QDEPTH) & M_QDEPTH) + +#define S_RWOO 15 +#define V_RWOO(x) ((x) << S_RWOO) +#define F_RWOO V_RWOO(1U) + +#define S_WOOO 14 +#define V_WOOO(x) ((x) << S_WOOO) +#define F_WOOO V_WOOO(1U) + +#define S_DCOO 13 +#define V_DCOO(x) ((x) << S_DCOO) +#define F_DCOO V_DCOO(1U) + +#define S_DEF_REF 12 +#define V_DEF_REF(x) ((x) << S_DEF_REF) +#define F_DEF_REF V_DEF_REF(1U) + +#define S_DEV_TYPE 11 +#define V_DEV_TYPE(x) ((x) << S_DEV_TYPE) +#define F_DEV_TYPE V_DEV_TYPE(1U) + +#define S_CA_PTY_DLY 10 +#define V_CA_PTY_DLY(x) ((x) << S_CA_PTY_DLY) +#define F_CA_PTY_DLY V_CA_PTY_DLY(1U) + +#define S_ECC_MUX 8 +#define M_ECC_MUX 0x3U +#define V_ECC_MUX(x) ((x) << S_ECC_MUX) +#define G_ECC_MUX(x) (((x) >> S_ECC_MUX) & M_ECC_MUX) + +#define S_CE_THRESHOLD 0 +#define M_CE_THRESHOLD 0xffU +#define V_CE_THRESHOLD(x) ((x) << S_CE_THRESHOLD) +#define G_CE_THRESHOLD(x) (((x) >> S_CE_THRESHOLD) & M_CE_THRESHOLD) + #define A_MC_UPCTL_PPCFG 0x40084 +#define A_MC_LMC_MCOPT2 0x40084 + +#define S_SELF_REF_EN 31 +#define V_SELF_REF_EN(x) ((x) << S_SELF_REF_EN) +#define F_SELF_REF_EN V_SELF_REF_EN(1U) + +#define S_XSR_PREVENT 30 +#define V_XSR_PREVENT(x) ((x) << S_XSR_PREVENT) +#define F_XSR_PREVENT V_XSR_PREVENT(1U) + +#define S_INIT_START 29 +#define V_INIT_START(x) ((x) << S_INIT_START) +#define F_INIT_START V_INIT_START(1U) + +#define S_MC_ENABLE 28 +#define V_MC_ENABLE(x) ((x) << S_MC_ENABLE) +#define F_MC_ENABLE V_MC_ENABLE(1U) + +#define S_CLK_DISABLE 24 +#define M_CLK_DISABLE 0xfU +#define V_CLK_DISABLE(x) ((x) << S_CLK_DISABLE) +#define G_CLK_DISABLE(x) (((x) >> S_CLK_DISABLE) & M_CLK_DISABLE) + +#define S_RESET_RANK 20 +#define M_RESET_RANK 0xfU +#define V_RESET_RANK(x) ((x) << S_RESET_RANK) +#define G_RESET_RANK(x) (((x) >> S_RESET_RANK) & M_RESET_RANK) + +#define S_MCIF_COMP_PTY_EN 19 +#define V_MCIF_COMP_PTY_EN(x) ((x) << S_MCIF_COMP_PTY_EN) +#define F_MCIF_COMP_PTY_EN V_MCIF_COMP_PTY_EN(1U) + +#define S_CKE_OE 17 +#define V_CKE_OE(x) ((x) << S_CKE_OE) +#define F_CKE_OE V_CKE_OE(1U) + +#define S_RESET_OE 16 +#define V_RESET_OE(x) ((x) << S_RESET_OE) +#define F_RESET_OE V_RESET_OE(1U) + +#define S_DFI_PHYUD_CNTL 14 +#define V_DFI_PHYUD_CNTL(x) ((x) << S_DFI_PHYUD_CNTL) +#define F_DFI_PHYUD_CNTL V_DFI_PHYUD_CNTL(1U) + +#define S_DFI_PHYUD_ACK 13 +#define V_DFI_PHYUD_ACK(x) ((x) << S_DFI_PHYUD_ACK) +#define F_DFI_PHYUD_ACK V_DFI_PHYUD_ACK(1U) + +#define S_T6_DFI_INIT_START 12 +#define V_T6_DFI_INIT_START(x) ((x) << S_T6_DFI_INIT_START) +#define F_T6_DFI_INIT_START V_T6_DFI_INIT_START(1U) + +#define S_PM_ENABLE 8 +#define M_PM_ENABLE 0xfU +#define V_PM_ENABLE(x) ((x) << S_PM_ENABLE) +#define G_PM_ENABLE(x) (((x) >> S_PM_ENABLE) & M_PM_ENABLE) + +#define S_RD_DEFREF_CNT 4 +#define M_RD_DEFREF_CNT 0xfU +#define V_RD_DEFREF_CNT(x) ((x) << S_RD_DEFREF_CNT) +#define G_RD_DEFREF_CNT(x) (((x) >> S_RD_DEFREF_CNT) & M_RD_DEFREF_CNT) + #define A_MC_UPCTL_MSTAT 0x40088 #define S_SELF_REFRESH 2 @@ -38883,6 +57158,26 @@ #define V_T_RTP0(x) ((x) << S_T_RTP0) #define G_T_RTP0(x) (((x) >> S_T_RTP0) & M_T_RTP0) +#define A_MC_LMC_CFGR0 0x40100 + +#define S_ROW_WIDTH 12 +#define M_ROW_WIDTH 0x7U +#define V_ROW_WIDTH(x) ((x) << S_ROW_WIDTH) +#define G_ROW_WIDTH(x) (((x) >> S_ROW_WIDTH) & M_ROW_WIDTH) + +#define S_ADDR_MODE 8 +#define M_ADDR_MODE 0xfU +#define V_ADDR_MODE(x) ((x) << S_ADDR_MODE) +#define G_ADDR_MODE(x) (((x) >> S_ADDR_MODE) & M_ADDR_MODE) + +#define S_MIRROR 4 +#define V_MIRROR(x) ((x) << S_MIRROR) +#define F_MIRROR V_MIRROR(1U) + +#define S_RANK_ENABLE 0 +#define V_RANK_ENABLE(x) ((x) << S_RANK_ENABLE) +#define F_RANK_ENABLE V_RANK_ENABLE(1U) + #define A_MC_UPCTL_TWR 0x40104 #define S_U_T_WR 0 @@ -38947,6 +57242,26 @@ #define V_T_CKESR(x) ((x) << S_T_CKESR) #define G_T_CKESR(x) (((x) >> S_T_CKESR) & M_T_CKESR) +#define A_MC_LMC_INITSEQ0 0x40140 + +#define S_INIT_ENABLE 31 +#define V_INIT_ENABLE(x) ((x) << S_INIT_ENABLE) +#define F_INIT_ENABLE V_INIT_ENABLE(1U) + +#define S_WAIT 16 +#define M_WAIT 0xfffU +#define CXGBE_V_WAIT(x) ((x) << S_WAIT) +#define G_WAIT(x) (((x) >> S_WAIT) & M_WAIT) + +#define S_EN_MULTI_RANK_SEL 4 +#define V_EN_MULTI_RANK_SEL(x) ((x) << S_EN_MULTI_RANK_SEL) +#define F_EN_MULTI_RANK_SEL V_EN_MULTI_RANK_SEL(1U) + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + #define A_MC_UPCTL_TDPD 0x40144 #define S_T_DPD 0 @@ -38954,7 +57269,90 @@ #define V_T_DPD(x) ((x) << S_T_DPD) #define G_T_DPD(x) (((x) >> S_T_DPD) & M_T_DPD) +#define A_MC_LMC_CMD0 0x40144 + +#define S_CMD 29 +#define M_CMD 0x7U +#define V_CMD(x) ((x) << S_CMD) +#define G_CMD(x) (((x) >> S_CMD) & M_CMD) + +#define S_CMD_ACTN 28 +#define V_CMD_ACTN(x) ((x) << S_CMD_ACTN) +#define F_CMD_ACTN V_CMD_ACTN(1U) + +#define S_BG1 23 +#define V_BG1(x) ((x) << S_BG1) +#define F_BG1 V_BG1(1U) + +#define S_BANK 20 +#define M_BANK 0x7U +#define V_BANK(x) ((x) << S_BANK) +#define G_BANK(x) (((x) >> S_BANK) & M_BANK) + +#define A_MC_LMC_INITSEQ1 0x40148 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD1 0x4014c +#define A_MC_LMC_INITSEQ2 0x40150 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD2 0x40154 +#define A_MC_LMC_INITSEQ3 0x40158 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD3 0x4015c +#define A_MC_LMC_INITSEQ4 0x40160 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD4 0x40164 +#define A_MC_LMC_INITSEQ5 0x40168 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD5 0x4016c +#define A_MC_LMC_INITSEQ6 0x40170 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD6 0x40174 +#define A_MC_LMC_INITSEQ7 0x40178 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD7 0x4017c #define A_MC_UPCTL_ECCCFG 0x40180 +#define A_MC_LMC_INITSEQ8 0x40180 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + #define A_MC_UPCTL_ECCTST 0x40184 #define S_ECC_TEST_MASK0 0 @@ -38962,8 +57360,65 @@ #define V_ECC_TEST_MASK0(x) ((x) << S_ECC_TEST_MASK0) #define G_ECC_TEST_MASK0(x) (((x) >> S_ECC_TEST_MASK0) & M_ECC_TEST_MASK0) +#define A_MC_LMC_CMD8 0x40184 #define A_MC_UPCTL_ECCCLR 0x40188 +#define A_MC_LMC_INITSEQ9 0x40188 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + #define A_MC_UPCTL_ECCLOG 0x4018c +#define A_MC_LMC_CMD9 0x4018c +#define A_MC_LMC_INITSEQ10 0x40190 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD10 0x40194 +#define A_MC_LMC_INITSEQ11 0x40198 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD11 0x4019c +#define A_MC_LMC_INITSEQ12 0x401a0 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD12 0x401a4 +#define A_MC_LMC_INITSEQ13 0x401a8 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD13 0x401ac +#define A_MC_LMC_INITSEQ14 0x401b0 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD14 0x401b4 +#define A_MC_LMC_INITSEQ15 0x401b8 + +#define S_T6_RANK 0 +#define M_T6_RANK 0xfU +#define V_T6_RANK(x) ((x) << S_T6_RANK) +#define G_T6_RANK(x) (((x) >> S_T6_RANK) & M_T6_RANK) + +#define A_MC_LMC_CMD15 0x401bc #define A_MC_UPCTL_DTUWACTL 0x40200 #define S_DTU_WR_ROW0 13 @@ -38971,6 +57426,18 @@ #define V_DTU_WR_ROW0(x) ((x) << S_DTU_WR_ROW0) #define G_DTU_WR_ROW0(x) (((x) >> S_DTU_WR_ROW0) & M_DTU_WR_ROW0) +#define A_MC_LMC_SDTR0 0x40200 + +#define S_REFI 16 +#define M_REFI 0xffffU +#define V_REFI(x) ((x) << S_REFI) +#define G_REFI(x) (((x) >> S_REFI) & M_REFI) + +#define S_T_RFC_XPR 0 +#define M_T_RFC_XPR 0xfffU +#define V_T_RFC_XPR(x) ((x) << S_T_RFC_XPR) +#define G_T_RFC_XPR(x) (((x) >> S_T_RFC_XPR) & M_T_RFC_XPR) + #define A_MC_UPCTL_DTURACTL 0x40204 #define S_DTU_RD_ROW0 13 @@ -38978,15 +57445,177 @@ #define V_DTU_RD_ROW0(x) ((x) << S_DTU_RD_ROW0) #define G_DTU_RD_ROW0(x) (((x) >> S_DTU_RD_ROW0) & M_DTU_RD_ROW0) +#define A_MC_LMC_SDTR1 0x40204 + +#define S_T_LEADOFF 31 +#define V_T_LEADOFF(x) ((x) << S_T_LEADOFF) +#define F_T_LEADOFF V_T_LEADOFF(1U) + +#define S_ODT_DELAY 30 +#define V_ODT_DELAY(x) ((x) << S_ODT_DELAY) +#define F_ODT_DELAY V_ODT_DELAY(1U) + +#define S_ODT_WIDTH 29 +#define V_ODT_WIDTH(x) ((x) << S_ODT_WIDTH) +#define F_ODT_WIDTH V_ODT_WIDTH(1U) + +#define S_T_WTRO 24 +#define M_T_WTRO 0xfU +#define V_T_WTRO(x) ((x) << S_T_WTRO) +#define G_T_WTRO(x) (((x) >> S_T_WTRO) & M_T_WTRO) + +#define S_T_RTWO 16 +#define M_T_RTWO 0xfU +#define V_T_RTWO(x) ((x) << S_T_RTWO) +#define G_T_RTWO(x) (((x) >> S_T_RTWO) & M_T_RTWO) + +#define S_T_RTW_ADJ 12 +#define M_T_RTW_ADJ 0xfU +#define V_T_RTW_ADJ(x) ((x) << S_T_RTW_ADJ) +#define G_T_RTW_ADJ(x) (((x) >> S_T_RTW_ADJ) & M_T_RTW_ADJ) + +#define S_T_WTWO 8 +#define M_T_WTWO 0xfU +#define V_T_WTWO(x) ((x) << S_T_WTWO) +#define G_T_WTWO(x) (((x) >> S_T_WTWO) & M_T_WTWO) + +#define S_T_RTRO 0 +#define M_T_RTRO 0xfU +#define V_T_RTRO(x) ((x) << S_T_RTRO) +#define G_T_RTRO(x) (((x) >> S_T_RTRO) & M_T_RTRO) + #define A_MC_UPCTL_DTUCFG 0x40208 +#define A_MC_LMC_SDTR2 0x40208 + +#define S_T6_T_CWL 28 +#define M_T6_T_CWL 0xfU +#define V_T6_T_CWL(x) ((x) << S_T6_T_CWL) +#define G_T6_T_CWL(x) (((x) >> S_T6_T_CWL) & M_T6_T_CWL) + +#define S_T_RCD0 24 +#define M_T_RCD0 0xfU +#define V_T_RCD0(x) ((x) << S_T_RCD0) +#define G_T_RCD0(x) (((x) >> S_T_RCD0) & M_T_RCD0) + +#define S_T_PL 20 +#define M_T_PL 0xfU +#define V_T_PL(x) ((x) << S_T_PL) +#define G_T_PL(x) (((x) >> S_T_PL) & M_T_PL) + +#define S_T_RP0 16 +#define M_T_RP0 0xfU +#define V_T_RP0(x) ((x) << S_T_RP0) +#define G_T_RP0(x) (((x) >> S_T_RP0) & M_T_RP0) + +#define S_T_RP1 15 +#define V_T_RP1(x) ((x) << S_T_RP1) +#define F_T_RP1 V_T_RP1(1U) + +#define S_T_RCD1 14 +#define V_T_RCD1(x) ((x) << S_T_RCD1) +#define F_T_RCD1 V_T_RCD1(1U) + +#define S_T6_T_RC 8 +#define M_T6_T_RC 0x3fU +#define V_T6_T_RC(x) ((x) << S_T6_T_RC) +#define G_T6_T_RC(x) (((x) >> S_T6_T_RC) & M_T6_T_RC) + #define A_MC_UPCTL_DTUECTL 0x4020c +#define A_MC_LMC_SDTR3 0x4020c + +#define S_T_WTR_S 28 +#define M_T_WTR_S 0xfU +#define V_T_WTR_S(x) ((x) << S_T_WTR_S) +#define G_T_WTR_S(x) (((x) >> S_T_WTR_S) & M_T_WTR_S) + +#define S_T6_T_WTR 24 +#define M_T6_T_WTR 0xfU +#define V_T6_T_WTR(x) ((x) << S_T6_T_WTR) +#define G_T6_T_WTR(x) (((x) >> S_T6_T_WTR) & M_T6_T_WTR) + +#define S_FAW_ADJ 20 +#define M_FAW_ADJ 0x3U +#define V_FAW_ADJ(x) ((x) << S_FAW_ADJ) +#define G_FAW_ADJ(x) (((x) >> S_FAW_ADJ) & M_FAW_ADJ) + +#define S_T6_T_RTP 16 +#define M_T6_T_RTP 0xfU +#define V_T6_T_RTP(x) ((x) << S_T6_T_RTP) +#define G_T6_T_RTP(x) (((x) >> S_T6_T_RTP) & M_T6_T_RTP) + +#define S_T_RRD_L 12 +#define M_T_RRD_L 0xfU +#define V_T_RRD_L(x) ((x) << S_T_RRD_L) +#define G_T_RRD_L(x) (((x) >> S_T_RRD_L) & M_T_RRD_L) + +#define S_T6_T_RRD 8 +#define M_T6_T_RRD 0xfU +#define V_T6_T_RRD(x) ((x) << S_T6_T_RRD) +#define G_T6_T_RRD(x) (((x) >> S_T6_T_RRD) & M_T6_T_RRD) + +#define S_T_XSDLL 0 +#define M_T_XSDLL 0xffU +#define V_T_XSDLL(x) ((x) << S_T_XSDLL) +#define G_T_XSDLL(x) (((x) >> S_T_XSDLL) & M_T_XSDLL) + #define A_MC_UPCTL_DTUWD0 0x40210 +#define A_MC_LMC_SDTR4 0x40210 + +#define S_T_RDDATA_EN 24 +#define M_T_RDDATA_EN 0x7fU +#define V_T_RDDATA_EN(x) ((x) << S_T_RDDATA_EN) +#define G_T_RDDATA_EN(x) (((x) >> S_T_RDDATA_EN) & M_T_RDDATA_EN) + +#define S_T_SYS_RDLAT 16 +#define M_T_SYS_RDLAT 0x3fU +#define V_T_SYS_RDLAT(x) ((x) << S_T_SYS_RDLAT) +#define G_T_SYS_RDLAT(x) (((x) >> S_T_SYS_RDLAT) & M_T_SYS_RDLAT) + +#define S_T_CCD_L 12 +#define M_T_CCD_L 0xfU +#define V_T_CCD_L(x) ((x) << S_T_CCD_L) +#define G_T_CCD_L(x) (((x) >> S_T_CCD_L) & M_T_CCD_L) + +#define S_T_CCD 8 +#define M_T_CCD 0x7U +#define V_T_CCD(x) ((x) << S_T_CCD) +#define G_T_CCD(x) (((x) >> S_T_CCD) & M_T_CCD) + +#define S_T_CPDED 5 +#define M_T_CPDED 0x7U +#define V_T_CPDED(x) ((x) << S_T_CPDED) +#define G_T_CPDED(x) (((x) >> S_T_CPDED) & M_T_CPDED) + +#define S_T6_T_MOD 0 +#define M_T6_T_MOD 0x1fU +#define V_T6_T_MOD(x) ((x) << S_T6_T_MOD) +#define G_T6_T_MOD(x) (((x) >> S_T6_T_MOD) & M_T6_T_MOD) + #define A_MC_UPCTL_DTUWD1 0x40214 +#define A_MC_LMC_SDTR5 0x40214 + +#define S_T_PHY_WRDATA 24 +#define M_T_PHY_WRDATA 0x7U +#define V_T_PHY_WRDATA(x) ((x) << S_T_PHY_WRDATA) +#define G_T_PHY_WRDATA(x) (((x) >> S_T_PHY_WRDATA) & M_T_PHY_WRDATA) + +#define S_T_PHY_WRLAT 16 +#define M_T_PHY_WRLAT 0x1fU +#define V_T_PHY_WRLAT(x) ((x) << S_T_PHY_WRLAT) +#define G_T_PHY_WRLAT(x) (((x) >> S_T_PHY_WRLAT) & M_T_PHY_WRLAT) + #define A_MC_UPCTL_DTUWD2 0x40218 #define A_MC_UPCTL_DTUWD3 0x4021c #define A_MC_UPCTL_DTUWDM 0x40220 #define A_MC_UPCTL_DTURD0 0x40224 #define A_MC_UPCTL_DTURD1 0x40228 +#define A_MC_LMC_DBG0 0x40228 + +#define S_T_SYS_RDLAT_DBG 16 +#define M_T_SYS_RDLAT_DBG 0x1fU +#define V_T_SYS_RDLAT_DBG(x) ((x) << S_T_SYS_RDLAT_DBG) +#define G_T_SYS_RDLAT_DBG(x) (((x) >> S_T_SYS_RDLAT_DBG) & M_T_SYS_RDLAT_DBG) + #define A_MC_UPCTL_DTURD2 0x4022c #define A_MC_UPCTL_DTURD3 0x40230 #define A_MC_UPCTL_DTULFSRWD 0x40234 @@ -39005,12 +57634,105 @@ #define V_TCTRL_DELAY(x) ((x) << S_TCTRL_DELAY) #define G_TCTRL_DELAY(x) (((x) >> S_TCTRL_DELAY) & M_TCTRL_DELAY) +#define A_MC_LMC_SMR0 0x40240 + +#define S_SMR0_RFU0 13 +#define M_SMR0_RFU0 0x7U +#define V_SMR0_RFU0(x) ((x) << S_SMR0_RFU0) +#define G_SMR0_RFU0(x) (((x) >> S_SMR0_RFU0) & M_SMR0_RFU0) + +#define S_PPD 12 +#define V_PPD(x) ((x) << S_PPD) +#define F_PPD V_PPD(1U) + +#define S_WR_RTP 9 +#define M_WR_RTP 0x7U +#define V_WR_RTP(x) ((x) << S_WR_RTP) +#define G_WR_RTP(x) (((x) >> S_WR_RTP) & M_WR_RTP) + +#define S_SMR0_DLL 8 +#define V_SMR0_DLL(x) ((x) << S_SMR0_DLL) +#define F_SMR0_DLL V_SMR0_DLL(1U) + +#define S_TM 7 +#define V_TM(x) ((x) << S_TM) +#define F_TM V_TM(1U) + +#define S_CL31 4 +#define M_CL31 0x7U +#define V_CL31(x) ((x) << S_CL31) +#define G_CL31(x) (((x) >> S_CL31) & M_CL31) + +#define S_RBT 3 +#define V_RBT(x) ((x) << S_RBT) +#define F_RBT V_RBT(1U) + +#define S_CL0 2 +#define V_CL0(x) ((x) << S_CL0) +#define F_CL0 V_CL0(1U) + +#define S_BL 0 +#define M_BL 0x3U +#define V_BL(x) ((x) << S_BL) +#define G_BL(x) (((x) >> S_BL) & M_BL) + #define A_MC_UPCTL_DFIODTCFG 0x40244 #define S_RANK3_ODT_WRITE_NSEL 26 #define V_RANK3_ODT_WRITE_NSEL(x) ((x) << S_RANK3_ODT_WRITE_NSEL) #define F_RANK3_ODT_WRITE_NSEL V_RANK3_ODT_WRITE_NSEL(1U) +#define A_MC_LMC_SMR1 0x40244 + +#define S_QOFF 12 +#define V_QOFF(x) ((x) << S_QOFF) +#define F_QOFF V_QOFF(1U) + +#define S_TDQS 11 +#define V_TDQS(x) ((x) << S_TDQS) +#define F_TDQS V_TDQS(1U) + +#define S_SMR1_RFU0 10 +#define V_SMR1_RFU0(x) ((x) << S_SMR1_RFU0) +#define F_SMR1_RFU0 V_SMR1_RFU0(1U) + +#define S_RTT_NOM0 9 +#define V_RTT_NOM0(x) ((x) << S_RTT_NOM0) +#define F_RTT_NOM0 V_RTT_NOM0(1U) + +#define S_SMR1_RFU1 8 +#define V_SMR1_RFU1(x) ((x) << S_SMR1_RFU1) +#define F_SMR1_RFU1 V_SMR1_RFU1(1U) + +#define S_WR_LEVEL 7 +#define V_WR_LEVEL(x) ((x) << S_WR_LEVEL) +#define F_WR_LEVEL V_WR_LEVEL(1U) + +#define S_RTT_NOM1 6 +#define V_RTT_NOM1(x) ((x) << S_RTT_NOM1) +#define F_RTT_NOM1 V_RTT_NOM1(1U) + +#define S_DIC0 5 +#define V_DIC0(x) ((x) << S_DIC0) +#define F_DIC0 V_DIC0(1U) + +#define S_AL 3 +#define M_AL 0x3U +#define V_AL(x) ((x) << S_AL) +#define G_AL(x) (((x) >> S_AL) & M_AL) + +#define S_RTT_NOM2 2 +#define V_RTT_NOM2(x) ((x) << S_RTT_NOM2) +#define F_RTT_NOM2 V_RTT_NOM2(1U) + +#define S_DIC1 1 +#define V_DIC1(x) ((x) << S_DIC1) +#define F_DIC1 V_DIC1(1U) + +#define S_SMR1_DLL 0 +#define V_SMR1_DLL(x) ((x) << S_SMR1_DLL) +#define F_SMR1_DLL V_SMR1_DLL(1U) + #define A_MC_UPCTL_DFIODTCFG1 0x40248 #define S_ODT_LEN_B8_R 24 @@ -39033,6 +57755,43 @@ #define V_ODT_LAT_W(x) ((x) << S_ODT_LAT_W) #define G_ODT_LAT_W(x) (((x) >> S_ODT_LAT_W) & M_ODT_LAT_W) +#define A_MC_LMC_SMR2 0x40248 + +#define S_WR_CRC 12 +#define V_WR_CRC(x) ((x) << S_WR_CRC) +#define F_WR_CRC V_WR_CRC(1U) + +#define S_RD_CRC 11 +#define V_RD_CRC(x) ((x) << S_RD_CRC) +#define F_RD_CRC V_RD_CRC(1U) + +#define S_RTT_WR 9 +#define M_RTT_WR 0x3U +#define V_RTT_WR(x) ((x) << S_RTT_WR) +#define G_RTT_WR(x) (((x) >> S_RTT_WR) & M_RTT_WR) + +#define S_SMR2_RFU0 8 +#define V_SMR2_RFU0(x) ((x) << S_SMR2_RFU0) +#define F_SMR2_RFU0 V_SMR2_RFU0(1U) + +#define S_SRT_ASR1 7 +#define V_SRT_ASR1(x) ((x) << S_SRT_ASR1) +#define F_SRT_ASR1 V_SRT_ASR1(1U) + +#define S_ASR0 6 +#define V_ASR0(x) ((x) << S_ASR0) +#define F_ASR0 V_ASR0(1U) + +#define S_CWL 3 +#define M_CWL 0x7U +#define V_CWL(x) ((x) << S_CWL) +#define G_CWL(x) (((x) >> S_CWL) & M_CWL) + +#define S_PASR 0 +#define M_PASR 0x7U +#define V_PASR(x) ((x) << S_PASR) +#define G_PASR(x) (((x) >> S_PASR) & M_PASR) + #define A_MC_UPCTL_DFIODTRANKMAP 0x4024c #define S_ODT_RANK_MAP3 12 @@ -39055,6 +57814,44 @@ #define V_ODT_RANK_MAP0(x) ((x) << S_ODT_RANK_MAP0) #define G_ODT_RANK_MAP0(x) (((x) >> S_ODT_RANK_MAP0) & M_ODT_RANK_MAP0) +#define A_MC_LMC_SMR3 0x4024c + +#define S_MPR_RD_FMT 11 +#define M_MPR_RD_FMT 0x3U +#define V_MPR_RD_FMT(x) ((x) << S_MPR_RD_FMT) +#define G_MPR_RD_FMT(x) (((x) >> S_MPR_RD_FMT) & M_MPR_RD_FMT) + +#define S_SMR3_RFU0 9 +#define M_SMR3_RFU0 0x3U +#define V_SMR3_RFU0(x) ((x) << S_SMR3_RFU0) +#define G_SMR3_RFU0(x) (((x) >> S_SMR3_RFU0) & M_SMR3_RFU0) + +#define S_FGR_MODE 6 +#define M_FGR_MODE 0x7U +#define V_FGR_MODE(x) ((x) << S_FGR_MODE) +#define G_FGR_MODE(x) (((x) >> S_FGR_MODE) & M_FGR_MODE) + +#define S_MRS_RDO 5 +#define V_MRS_RDO(x) ((x) << S_MRS_RDO) +#define F_MRS_RDO V_MRS_RDO(1U) + +#define S_DRAM_ADR 4 +#define V_DRAM_ADR(x) ((x) << S_DRAM_ADR) +#define F_DRAM_ADR V_DRAM_ADR(1U) + +#define S_GD_MODE 3 +#define V_GD_MODE(x) ((x) << S_GD_MODE) +#define F_GD_MODE V_GD_MODE(1U) + +#define S_MPR 2 +#define V_MPR(x) ((x) << S_MPR) +#define F_MPR V_MPR(1U) + +#define S_MPR_SEL 0 +#define M_MPR_SEL 0x3U +#define V_MPR_SEL(x) ((x) << S_MPR_SEL) +#define G_MPR_SEL(x) (((x) >> S_MPR_SEL) & M_MPR_SEL) + #define A_MC_UPCTL_DFITPHYWRDATA 0x40250 #define S_TPHY_WRDATA 0 @@ -39062,6 +57859,53 @@ #define V_TPHY_WRDATA(x) ((x) << S_TPHY_WRDATA) #define G_TPHY_WRDATA(x) (((x) >> S_TPHY_WRDATA) & M_TPHY_WRDATA) +#define A_MC_LMC_SMR4 0x40250 + +#define S_WR_PRE 12 +#define V_WR_PRE(x) ((x) << S_WR_PRE) +#define F_WR_PRE V_WR_PRE(1U) + +#define S_RD_PRE 11 +#define V_RD_PRE(x) ((x) << S_RD_PRE) +#define F_RD_PRE V_RD_PRE(1U) + +#define S_RPT_MODE 10 +#define V_RPT_MODE(x) ((x) << S_RPT_MODE) +#define F_RPT_MODE V_RPT_MODE(1U) + +#define S_FESR_MODE 9 +#define V_FESR_MODE(x) ((x) << S_FESR_MODE) +#define F_FESR_MODE V_FESR_MODE(1U) + +#define S_CS_LAT_MODE 6 +#define M_CS_LAT_MODE 0x7U +#define V_CS_LAT_MODE(x) ((x) << S_CS_LAT_MODE) +#define G_CS_LAT_MODE(x) (((x) >> S_CS_LAT_MODE) & M_CS_LAT_MODE) + +#define S_ALERT_STAT 5 +#define V_ALERT_STAT(x) ((x) << S_ALERT_STAT) +#define F_ALERT_STAT V_ALERT_STAT(1U) + +#define S_IVM_MODE 4 +#define V_IVM_MODE(x) ((x) << S_IVM_MODE) +#define F_IVM_MODE V_IVM_MODE(1U) + +#define S_TCR_MODE 3 +#define V_TCR_MODE(x) ((x) << S_TCR_MODE) +#define F_TCR_MODE V_TCR_MODE(1U) + +#define S_TCR_RANGE 2 +#define V_TCR_RANGE(x) ((x) << S_TCR_RANGE) +#define F_TCR_RANGE V_TCR_RANGE(1U) + +#define S_MPD_MODE 1 +#define V_MPD_MODE(x) ((x) << S_MPD_MODE) +#define F_MPD_MODE V_MPD_MODE(1U) + +#define S_SMR4_RFU 0 +#define V_SMR4_RFU(x) ((x) << S_SMR4_RFU) +#define F_SMR4_RFU V_SMR4_RFU(1U) + #define A_MC_UPCTL_DFITPHYWRLAT 0x40254 #define S_TPHY_WRLAT 0 @@ -39069,6 +57913,63 @@ #define V_TPHY_WRLAT(x) ((x) << S_TPHY_WRLAT) #define G_TPHY_WRLAT(x) (((x) >> S_TPHY_WRLAT) & M_TPHY_WRLAT) +#define A_MC_LMC_SMR5 0x40254 + +#define S_RD_DBI 11 +#define V_RD_DBI(x) ((x) << S_RD_DBI) +#define F_RD_DBI V_RD_DBI(1U) + +#define S_WR_DBI 10 +#define V_WR_DBI(x) ((x) << S_WR_DBI) +#define F_WR_DBI V_WR_DBI(1U) + +#define S_DM_MODE 9 +#define V_DM_MODE(x) ((x) << S_DM_MODE) +#define F_DM_MODE V_DM_MODE(1U) + +#define S_RTT_PARK 6 +#define M_RTT_PARK 0x7U +#define V_RTT_PARK(x) ((x) << S_RTT_PARK) +#define G_RTT_PARK(x) (((x) >> S_RTT_PARK) & M_RTT_PARK) + +#define S_SMR5_RFU 5 +#define V_SMR5_RFU(x) ((x) << S_SMR5_RFU) +#define F_SMR5_RFU V_SMR5_RFU(1U) + +#define S_PAR_ERR_STAT 4 +#define V_PAR_ERR_STAT(x) ((x) << S_PAR_ERR_STAT) +#define F_PAR_ERR_STAT V_PAR_ERR_STAT(1U) + +#define S_CRC_CLEAR 3 +#define V_CRC_CLEAR(x) ((x) << S_CRC_CLEAR) +#define F_CRC_CLEAR V_CRC_CLEAR(1U) + +#define S_PAR_LAT_MODE 0 +#define M_PAR_LAT_MODE 0x7U +#define V_PAR_LAT_MODE(x) ((x) << S_PAR_LAT_MODE) +#define G_PAR_LAT_MODE(x) (((x) >> S_PAR_LAT_MODE) & M_PAR_LAT_MODE) + +#define A_MC_LMC_SMR6 0x40258 + +#define S_TCCD_L 10 +#define M_TCCD_L 0x7U +#define V_TCCD_L(x) ((x) << S_TCCD_L) +#define G_TCCD_L(x) (((x) >> S_TCCD_L) & M_TCCD_L) + +#define S_SRM6_RFU 7 +#define M_SRM6_RFU 0x7U +#define V_SRM6_RFU(x) ((x) << S_SRM6_RFU) +#define G_SRM6_RFU(x) (((x) >> S_SRM6_RFU) & M_SRM6_RFU) + +#define S_VREF_DQ_RANGE 6 +#define V_VREF_DQ_RANGE(x) ((x) << S_VREF_DQ_RANGE) +#define F_VREF_DQ_RANGE V_VREF_DQ_RANGE(1U) + +#define S_VREF_DQ_VALUE 0 +#define M_VREF_DQ_VALUE 0x3fU +#define V_VREF_DQ_VALUE(x) ((x) << S_VREF_DQ_VALUE) +#define G_VREF_DQ_VALUE(x) (((x) >> S_VREF_DQ_VALUE) & M_VREF_DQ_VALUE) + #define A_MC_UPCTL_DFITRDDATAEN 0x40260 #define S_TRDDATA_EN 0 @@ -39118,6 +58019,16 @@ #define V_TCTRLUPD_MIN(x) ((x) << S_TCTRLUPD_MIN) #define G_TCTRLUPD_MIN(x) (((x) >> S_TCTRLUPD_MIN) & M_TCTRLUPD_MIN) +#define A_MC_LMC_ODTR0 0x40280 + +#define S_RK0W 25 +#define V_RK0W(x) ((x) << S_RK0W) +#define F_RK0W V_RK0W(1U) + +#define S_RK0R 24 +#define V_RK0R(x) ((x) << S_RK0R) +#define F_RK0R V_RK0R(1U) + #define A_MC_UPCTL_DFITCTRLUPDMAX 0x40284 #define S_TCTRLUPD_MAX 0 @@ -39315,6 +58226,17 @@ #define A_MC_UPCTL_DFITRWRLVLRESP0 0x40300 #define A_MC_UPCTL_DFITRWRLVLRESP1 0x40304 +#define A_MC_LMC_CALSTAT 0x40304 + +#define S_PHYUPD_ERR 28 +#define M_PHYUPD_ERR 0xfU +#define V_PHYUPD_ERR(x) ((x) << S_PHYUPD_ERR) +#define G_PHYUPD_ERR(x) (((x) >> S_PHYUPD_ERR) & M_PHYUPD_ERR) + +#define S_PHYUPD_BUSY 27 +#define V_PHYUPD_BUSY(x) ((x) << S_PHYUPD_BUSY) +#define F_PHYUPD_BUSY V_PHYUPD_BUSY(1U) + #define A_MC_UPCTL_DFITRWRLVLRESP2 0x40308 #define S_DFI_WRLVL_RESP2 0 @@ -39350,7 +58272,9 @@ #define G_DFI_RDLVL_DELAY2(x) (((x) >> S_DFI_RDLVL_DELAY2) & M_DFI_RDLVL_DELAY2) #define A_MC_UPCTL_DFITRRDLVLGATEDELAY0 0x40330 +#define A_MC_LMC_T_PHYUPD0 0x40330 #define A_MC_UPCTL_DFITRRDLVLGATEDELAY1 0x40334 +#define A_MC_LMC_T_PHYUPD1 0x40334 #define A_MC_UPCTL_DFITRRDLVLGATEDELAY2 0x40338 #define S_DFI_RDLVL_GATE_DELAY2 0 @@ -39358,6 +58282,7 @@ #define V_DFI_RDLVL_GATE_DELAY2(x) ((x) << S_DFI_RDLVL_GATE_DELAY2) #define G_DFI_RDLVL_GATE_DELAY2(x) (((x) >> S_DFI_RDLVL_GATE_DELAY2) & M_DFI_RDLVL_GATE_DELAY2) +#define A_MC_LMC_T_PHYUPD2 0x40338 #define A_MC_UPCTL_DFITRCMD 0x4033c #define S_DFITRCMD_START 31 @@ -39374,6 +58299,7 @@ #define V_DFITRCMD_OPCODE(x) ((x) << S_DFITRCMD_OPCODE) #define G_DFITRCMD_OPCODE(x) (((x) >> S_DFITRCMD_OPCODE) & M_DFITRCMD_OPCODE) +#define A_MC_LMC_T_PHYUPD3 0x4033c #define A_MC_UPCTL_IPVR 0x403f8 #define A_MC_UPCTL_IPTR 0x403fc #define A_MC_P_DDRPHY_RST_CTRL 0x41300 @@ -39399,7 +58325,47 @@ #define V_CTL_RST_N(x) ((x) << S_CTL_RST_N) #define F_CTL_RST_N V_CTL_RST_N(1U) +#define S_PHY_CAL_REQ 21 +#define V_PHY_CAL_REQ(x) ((x) << S_PHY_CAL_REQ) +#define F_PHY_CAL_REQ V_PHY_CAL_REQ(1U) + +#define S_T6_PHY_DRAM_WL 17 +#define M_T6_PHY_DRAM_WL 0xfU +#define V_T6_PHY_DRAM_WL(x) ((x) << S_T6_PHY_DRAM_WL) +#define G_T6_PHY_DRAM_WL(x) (((x) >> S_T6_PHY_DRAM_WL) & M_T6_PHY_DRAM_WL) + #define A_MC_P_PERFORMANCE_CTRL 0x41304 + +#define S_BUF_USE_TH 12 +#define M_BUF_USE_TH 0x7U +#define V_BUF_USE_TH(x) ((x) << S_BUF_USE_TH) +#define G_BUF_USE_TH(x) (((x) >> S_BUF_USE_TH) & M_BUF_USE_TH) + +#define S_MC_IDLE_TH 8 +#define M_MC_IDLE_TH 0xfU +#define V_MC_IDLE_TH(x) ((x) << S_MC_IDLE_TH) +#define G_MC_IDLE_TH(x) (((x) >> S_MC_IDLE_TH) & M_MC_IDLE_TH) + +#define S_RMW_DEFER_EN 7 +#define V_RMW_DEFER_EN(x) ((x) << S_RMW_DEFER_EN) +#define F_RMW_DEFER_EN V_RMW_DEFER_EN(1U) + +#define S_DDR3_BRBC_MODE 6 +#define V_DDR3_BRBC_MODE(x) ((x) << S_DDR3_BRBC_MODE) +#define F_DDR3_BRBC_MODE V_DDR3_BRBC_MODE(1U) + +#define S_RMW_DWRITE_EN 5 +#define V_RMW_DWRITE_EN(x) ((x) << S_RMW_DWRITE_EN) +#define F_RMW_DWRITE_EN V_RMW_DWRITE_EN(1U) + +#define S_RMW_MERGE_EN 4 +#define V_RMW_MERGE_EN(x) ((x) << S_RMW_MERGE_EN) +#define F_RMW_MERGE_EN V_RMW_MERGE_EN(1U) + +#define S_SYNC_PAB_EN 3 +#define V_SYNC_PAB_EN(x) ((x) << S_SYNC_PAB_EN) +#define F_SYNC_PAB_EN V_SYNC_PAB_EN(1U) + #define A_MC_P_ECC_CTRL 0x41308 #define A_MC_P_PAR_ENABLE 0x4130c #define A_MC_P_PAR_CAUSE 0x41310 @@ -39435,6 +58401,18 @@ #define V_STATIC_LAT(x) ((x) << S_STATIC_LAT) #define F_STATIC_LAT V_STATIC_LAT(1U) +#define S_STATIC_PP64 26 +#define V_STATIC_PP64(x) ((x) << S_STATIC_PP64) +#define F_STATIC_PP64 V_STATIC_PP64(1U) + +#define S_STATIC_PPEN 25 +#define V_STATIC_PPEN(x) ((x) << S_STATIC_PPEN) +#define F_STATIC_PPEN V_STATIC_PPEN(1U) + +#define S_STATIC_OOOEN 24 +#define V_STATIC_OOOEN(x) ((x) << S_STATIC_OOOEN) +#define F_STATIC_OOOEN V_STATIC_OOOEN(1U) + #define A_MC_P_CORE_PCTL_STAT 0x41328 #define A_MC_P_DEBUG_CNT 0x4132c #define A_MC_CE_ERR_DATA_RDATA 0x41330 @@ -39456,6 +58434,38 @@ #define A_MC_P_FPGA_BONUS 0x413bc #define A_MC_P_DEBUG_CFG 0x413c0 #define A_MC_P_DEBUG_RPT 0x413c4 +#define A_MC_P_PHY_ADR_CK_EN 0x413c8 + +#define S_ADR_CK_EN 0 +#define V_ADR_CK_EN(x) ((x) << S_ADR_CK_EN) +#define F_ADR_CK_EN V_ADR_CK_EN(1U) + +#define A_MC_CE_ERR_ECC_DATA0 0x413d0 +#define A_MC_CE_ERR_ECC_DATA1 0x413d4 +#define A_MC_UE_ERR_ECC_DATA0 0x413d8 +#define A_MC_UE_ERR_ECC_DATA1 0x413dc +#define A_MC_P_RMW_PRIO 0x413f0 + +#define S_WR_HI_TH 24 +#define M_WR_HI_TH 0xffU +#define V_WR_HI_TH(x) ((x) << S_WR_HI_TH) +#define G_WR_HI_TH(x) (((x) >> S_WR_HI_TH) & M_WR_HI_TH) + +#define S_WR_MID_TH 16 +#define M_WR_MID_TH 0xffU +#define V_WR_MID_TH(x) ((x) << S_WR_MID_TH) +#define G_WR_MID_TH(x) (((x) >> S_WR_MID_TH) & M_WR_MID_TH) + +#define S_RD_HI_TH 8 +#define M_RD_HI_TH 0xffU +#define V_RD_HI_TH(x) ((x) << S_RD_HI_TH) +#define G_RD_HI_TH(x) (((x) >> S_RD_HI_TH) & M_RD_HI_TH) + +#define S_RD_MID_TH 0 +#define M_RD_MID_TH 0xffU +#define V_RD_MID_TH(x) ((x) << S_RD_MID_TH) +#define G_RD_MID_TH(x) (((x) >> S_RD_MID_TH) & M_RD_MID_TH) + #define A_MC_P_BIST_CMD 0x41400 #define S_BURST_LEN 16 @@ -39467,7 +58477,9 @@ #define A_MC_P_BIST_CMD_LEN 0x41408 #define A_MC_P_BIST_DATA_PATTERN 0x4140c #define A_MC_P_BIST_USER_WDATA0 0x41414 +#define A_MC_P_BIST_USER_WMASK0 0x41414 #define A_MC_P_BIST_USER_WDATA1 0x41418 +#define A_MC_P_BIST_USER_WMASK1 0x41418 #define A_MC_P_BIST_USER_WDATA2 0x4141c #define S_USER_DATA_MASK 8 @@ -39475,6 +58487,21 @@ #define V_USER_DATA_MASK(x) ((x) << S_USER_DATA_MASK) #define G_USER_DATA_MASK(x) (((x) >> S_USER_DATA_MASK) & M_USER_DATA_MASK) +#define A_MC_P_BIST_USER_WMASK2 0x4141c + +#define S_MASK_128_1 9 +#define V_MASK_128_1(x) ((x) << S_MASK_128_1) +#define F_MASK_128_1 V_MASK_128_1(1U) + +#define S_MASK_128_0 8 +#define V_MASK_128_0(x) ((x) << S_MASK_128_0) +#define F_MASK_128_0 V_MASK_128_0(1U) + +#define S_USER_MASK_ECC 0 +#define M_USER_MASK_ECC 0xffU +#define V_USER_MASK_ECC(x) ((x) << S_USER_MASK_ECC) +#define G_USER_MASK_ECC(x) (((x) >> S_USER_MASK_ECC) & M_USER_MASK_ECC) + #define A_MC_P_BIST_NUM_ERR 0x41480 #define A_MC_P_BIST_ERR_FIRST_ADDR 0x41484 #define A_MC_P_BIST_STATUS_RDATA 0x41488 @@ -39521,6 +58548,14 @@ #define V_MRS_CMD_DATA_N3(x) ((x) << S_MRS_CMD_DATA_N3) #define F_MRS_CMD_DATA_N3 V_MRS_CMD_DATA_N3(1U) +#define S_DP18_WRAPSEL 5 +#define V_DP18_WRAPSEL(x) ((x) << S_DP18_WRAPSEL) +#define F_DP18_WRAPSEL V_DP18_WRAPSEL(1U) + +#define S_HW_VALUE 4 +#define V_HW_VALUE(x) ((x) << S_HW_VALUE) +#define F_HW_VALUE V_HW_VALUE(1U) + #define A_MC_DDRPHY_DP18_DATA_BIT_DIR0 0x44008 #define S_DATA_BIT_DIR_0_15 0 @@ -39774,6 +58809,16 @@ #define V_WR_DEBUG_SEL(x) ((x) << S_WR_DEBUG_SEL) #define G_WR_DEBUG_SEL(x) (((x) >> S_WR_DEBUG_SEL) & M_WR_DEBUG_SEL) +#define S_DP18_HS_PROBE_A_SEL 11 +#define M_DP18_HS_PROBE_A_SEL 0x1fU +#define V_DP18_HS_PROBE_A_SEL(x) ((x) << S_DP18_HS_PROBE_A_SEL) +#define G_DP18_HS_PROBE_A_SEL(x) (((x) >> S_DP18_HS_PROBE_A_SEL) & M_DP18_HS_PROBE_A_SEL) + +#define S_DP18_HS_PROBE_B_SEL 6 +#define M_DP18_HS_PROBE_B_SEL 0x1fU +#define V_DP18_HS_PROBE_B_SEL(x) ((x) << S_DP18_HS_PROBE_B_SEL) +#define G_DP18_HS_PROBE_B_SEL(x) (((x) >> S_DP18_HS_PROBE_B_SEL) & M_DP18_HS_PROBE_B_SEL) + #define A_MC_DDRPHY_DP18_READ_DELAY_OFFSET0_RANK_PAIR 0x44030 #define S_OFFSET_BITS1_7 8 @@ -39837,6 +58882,10 @@ #define V_PER_RDCLK_UPDATE_DIS(x) ((x) << S_PER_RDCLK_UPDATE_DIS) #define F_PER_RDCLK_UPDATE_DIS V_PER_RDCLK_UPDATE_DIS(1U) +#define S_DQS_ALIGN_BY_QUAD 4 +#define V_DQS_ALIGN_BY_QUAD(x) ((x) << S_DQS_ALIGN_BY_QUAD) +#define F_DQS_ALIGN_BY_QUAD V_DQS_ALIGN_BY_QUAD(1U) + #define A_MC_DDRPHY_DP18_DQS_GATE_DELAY_RP 0x4404c #define S_DQS_GATE_DELAY_N0 12 @@ -39991,6 +59040,38 @@ #define V_MIN_EYE_MASK(x) ((x) << S_MIN_EYE_MASK) #define F_MIN_EYE_MASK V_MIN_EYE_MASK(1U) +#define A_MC_DDRPHY_DP18_WRCLK_CNTL 0x44058 + +#define S_PRBS_WAIT 14 +#define M_PRBS_WAIT 0x3U +#define V_PRBS_WAIT(x) ((x) << S_PRBS_WAIT) +#define G_PRBS_WAIT(x) (((x) >> S_PRBS_WAIT) & M_PRBS_WAIT) + +#define S_PRBS_SYNC_EARLY 13 +#define V_PRBS_SYNC_EARLY(x) ((x) << S_PRBS_SYNC_EARLY) +#define F_PRBS_SYNC_EARLY V_PRBS_SYNC_EARLY(1U) + +#define S_RD_DELAY_EARLY 12 +#define V_RD_DELAY_EARLY(x) ((x) << S_RD_DELAY_EARLY) +#define F_RD_DELAY_EARLY V_RD_DELAY_EARLY(1U) + +#define S_SS_QUAD_CAL 10 +#define V_SS_QUAD_CAL(x) ((x) << S_SS_QUAD_CAL) +#define F_SS_QUAD_CAL V_SS_QUAD_CAL(1U) + +#define S_SS_QUAD 8 +#define M_SS_QUAD 0x3U +#define V_SS_QUAD(x) ((x) << S_SS_QUAD) +#define G_SS_QUAD(x) (((x) >> S_SS_QUAD) & M_SS_QUAD) + +#define S_SS_RD_DELAY 7 +#define V_SS_RD_DELAY(x) ((x) << S_SS_RD_DELAY) +#define F_SS_RD_DELAY V_SS_RD_DELAY(1U) + +#define S_FORCE_HI_Z 6 +#define V_FORCE_HI_Z(x) ((x) << S_FORCE_HI_Z) +#define F_FORCE_HI_Z V_FORCE_HI_Z(1U) + #define A_MC_DDRPHY_DP18_WR_LVL_STATUS0 0x4405c #define S_CLK_LEVEL 14 @@ -40026,6 +59107,10 @@ #define V_ZERO_DETECTED(x) ((x) << S_ZERO_DETECTED) #define F_ZERO_DETECTED V_ZERO_DETECTED(1U) +#define S_WR_LVL_DONE 12 +#define V_WR_LVL_DONE(x) ((x) << S_WR_LVL_DONE) +#define F_WR_LVL_DONE V_WR_LVL_DONE(1U) + #define A_MC_DDRPHY_DP18_WR_CNTR_STATUS0 0x44060 #define S_BIT_CENTERED 11 @@ -40057,6 +59142,10 @@ #define V_DDONE(x) ((x) << S_DDONE) #define F_DDONE V_DDONE(1U) +#define S_WR_CNTR_DONE 5 +#define V_WR_CNTR_DONE(x) ((x) << S_WR_CNTR_DONE) +#define F_WR_CNTR_DONE V_WR_CNTR_DONE(1U) + #define A_MC_DDRPHY_DP18_WR_CNTR_STATUS1 0x44064 #define S_FW_LEFT_SIDE 5 @@ -40163,6 +59252,18 @@ #define V_OFFSET_ERR_MASK(x) ((x) << S_OFFSET_ERR_MASK) #define F_OFFSET_ERR_MASK V_OFFSET_ERR_MASK(1U) +#define S_DQS_REC_LOW_POWER 11 +#define V_DQS_REC_LOW_POWER(x) ((x) << S_DQS_REC_LOW_POWER) +#define F_DQS_REC_LOW_POWER V_DQS_REC_LOW_POWER(1U) + +#define S_DQ_REC_LOW_POWER 10 +#define V_DQ_REC_LOW_POWER(x) ((x) << S_DQ_REC_LOW_POWER) +#define F_DQ_REC_LOW_POWER V_DQ_REC_LOW_POWER(1U) + +#define S_ADVANCE_PR_VALUE 0 +#define V_ADVANCE_PR_VALUE(x) ((x) << S_ADVANCE_PR_VALUE) +#define F_ADVANCE_PR_VALUE V_ADVANCE_PR_VALUE(1U) + #define A_MC_DDRPHY_DP18_DFT_WRAP_STATUS 0x44074 #define S_CHECKER_RESET 14 @@ -40179,7 +59280,38 @@ #define V_ERROR(x) ((x) << S_ERROR) #define G_ERROR(x) (((x) >> S_ERROR) & M_ERROR) +#define S_CHECKER_ENABLE 15 +#define V_CHECKER_ENABLE(x) ((x) << S_CHECKER_ENABLE) +#define F_CHECKER_ENABLE V_CHECKER_ENABLE(1U) + +#define S_DP18_DFT_ERROR 0 +#define M_DP18_DFT_ERROR 0x3fU +#define V_DP18_DFT_ERROR(x) ((x) << S_DP18_DFT_ERROR) +#define G_DP18_DFT_ERROR(x) (((x) >> S_DP18_DFT_ERROR) & M_DP18_DFT_ERROR) + #define A_MC_DDRPHY_DP18_RD_DIA_CONFIG0 0x44078 + +#define S_SYSCLK_RDCLK_OFFSET 8 +#define M_SYSCLK_RDCLK_OFFSET 0x7fU +#define V_SYSCLK_RDCLK_OFFSET(x) ((x) << S_SYSCLK_RDCLK_OFFSET) +#define G_SYSCLK_RDCLK_OFFSET(x) (((x) >> S_SYSCLK_RDCLK_OFFSET) & M_SYSCLK_RDCLK_OFFSET) + +#define S_SYSCLK_DQSCLK_OFFSET 0 +#define M_SYSCLK_DQSCLK_OFFSET 0x7fU +#define V_SYSCLK_DQSCLK_OFFSET(x) ((x) << S_SYSCLK_DQSCLK_OFFSET) +#define G_SYSCLK_DQSCLK_OFFSET(x) (((x) >> S_SYSCLK_DQSCLK_OFFSET) & M_SYSCLK_DQSCLK_OFFSET) + +#define S_T6_SYSCLK_DQSCLK_OFFSET 8 +#define M_T6_SYSCLK_DQSCLK_OFFSET 0x7fU +#define V_T6_SYSCLK_DQSCLK_OFFSET(x) ((x) << S_T6_SYSCLK_DQSCLK_OFFSET) +#define G_T6_SYSCLK_DQSCLK_OFFSET(x) (((x) >> S_T6_SYSCLK_DQSCLK_OFFSET) & M_T6_SYSCLK_DQSCLK_OFFSET) + +#define S_T6_SYSCLK_RDCLK_OFFSET 0 +#define M_T6_SYSCLK_RDCLK_OFFSET 0x7fU +#define V_T6_SYSCLK_RDCLK_OFFSET(x) ((x) << S_T6_SYSCLK_RDCLK_OFFSET) +#define G_T6_SYSCLK_RDCLK_OFFSET(x) (((x) >> S_T6_SYSCLK_RDCLK_OFFSET) & M_T6_SYSCLK_RDCLK_OFFSET) + +#define A_MC_DDRPHY_DP18_WRCLK_AUX_CNTL 0x4407c #define A_MC_DDRPHY_DP18_DQSCLK_PR0_RANK_PAIR 0x440c0 #define S_DQSCLK_ROT_CLK_N0_N2 8 @@ -40320,7 +59452,64 @@ #define G_MEMINTD23_POS(x) (((x) >> S_MEMINTD23_POS) & M_MEMINTD23_POS) #define A_MC_DDRPHY_DP18_RD_DIA_CONFIG1 0x440d4 + +#define S_DQS_ALIGN_SM 11 +#define M_DQS_ALIGN_SM 0x1fU +#define V_DQS_ALIGN_SM(x) ((x) << S_DQS_ALIGN_SM) +#define G_DQS_ALIGN_SM(x) (((x) >> S_DQS_ALIGN_SM) & M_DQS_ALIGN_SM) + +#define S_DQS_ALIGN_CNTR 7 +#define M_DQS_ALIGN_CNTR 0xfU +#define V_DQS_ALIGN_CNTR(x) ((x) << S_DQS_ALIGN_CNTR) +#define G_DQS_ALIGN_CNTR(x) (((x) >> S_DQS_ALIGN_CNTR) & M_DQS_ALIGN_CNTR) + +#define S_ITERATION_CNTR 6 +#define V_ITERATION_CNTR(x) ((x) << S_ITERATION_CNTR) +#define F_ITERATION_CNTR V_ITERATION_CNTR(1U) + +#define S_DQS_ALIGN_ITER_CNTR 0 +#define M_DQS_ALIGN_ITER_CNTR 0x3fU +#define V_DQS_ALIGN_ITER_CNTR(x) ((x) << S_DQS_ALIGN_ITER_CNTR) +#define G_DQS_ALIGN_ITER_CNTR(x) (((x) >> S_DQS_ALIGN_ITER_CNTR) & M_DQS_ALIGN_ITER_CNTR) + #define A_MC_DDRPHY_DP18_RD_DIA_CONFIG2 0x440d8 + +#define S_CALIBRATE_BIT 13 +#define M_CALIBRATE_BIT 0x7U +#define V_CALIBRATE_BIT(x) ((x) << S_CALIBRATE_BIT) +#define G_CALIBRATE_BIT(x) (((x) >> S_CALIBRATE_BIT) & M_CALIBRATE_BIT) + +#define S_DQS_ALIGN_QUAD 11 +#define M_DQS_ALIGN_QUAD 0x3U +#define V_DQS_ALIGN_QUAD(x) ((x) << S_DQS_ALIGN_QUAD) +#define G_DQS_ALIGN_QUAD(x) (((x) >> S_DQS_ALIGN_QUAD) & M_DQS_ALIGN_QUAD) + +#define S_DQS_QUAD_CONFIG 8 +#define M_DQS_QUAD_CONFIG 0x7U +#define V_DQS_QUAD_CONFIG(x) ((x) << S_DQS_QUAD_CONFIG) +#define G_DQS_QUAD_CONFIG(x) (((x) >> S_DQS_QUAD_CONFIG) & M_DQS_QUAD_CONFIG) + +#define S_OPERATE_MODE 4 +#define M_OPERATE_MODE 0xfU +#define V_OPERATE_MODE(x) ((x) << S_OPERATE_MODE) +#define G_OPERATE_MODE(x) (((x) >> S_OPERATE_MODE) & M_OPERATE_MODE) + +#define S_EN_DQS_OFFSET 3 +#define V_EN_DQS_OFFSET(x) ((x) << S_EN_DQS_OFFSET) +#define F_EN_DQS_OFFSET V_EN_DQS_OFFSET(1U) + +#define S_DQS_ALIGN_JITTER 2 +#define V_DQS_ALIGN_JITTER(x) ((x) << S_DQS_ALIGN_JITTER) +#define F_DQS_ALIGN_JITTER V_DQS_ALIGN_JITTER(1U) + +#define S_DIS_CLK_GATE 1 +#define V_DIS_CLK_GATE(x) ((x) << S_DIS_CLK_GATE) +#define F_DIS_CLK_GATE V_DIS_CLK_GATE(1U) + +#define S_MAX_DQS_ITER 0 +#define V_MAX_DQS_ITER(x) ((x) << S_MAX_DQS_ITER) +#define F_MAX_DQS_ITER V_MAX_DQS_ITER(1U) + #define A_MC_DDRPHY_DP18_DQSCLK_OFFSET 0x440dc #define S_DQS_OFFSET 8 @@ -40394,6 +59583,80 @@ #define G_INITIAL_DQS_ROT_N1_N3(x) (((x) >> S_INITIAL_DQS_ROT_N1_N3) & M_INITIAL_DQS_ROT_N1_N3) #define A_MC_DDRPHY_DP18_INITIAL_DQS_ALIGN1_RANK_PAIR 0x44174 +#define A_MC_DDRPHY_DP18_WRCLK_STATUS 0x44178 + +#define S_WRCLK_CALIB_DONE 15 +#define V_WRCLK_CALIB_DONE(x) ((x) << S_WRCLK_CALIB_DONE) +#define F_WRCLK_CALIB_DONE V_WRCLK_CALIB_DONE(1U) + +#define S_VALUE_UPDATED 14 +#define V_VALUE_UPDATED(x) ((x) << S_VALUE_UPDATED) +#define F_VALUE_UPDATED V_VALUE_UPDATED(1U) + +#define S_FAIL_PASS_V 13 +#define V_FAIL_PASS_V(x) ((x) << S_FAIL_PASS_V) +#define F_FAIL_PASS_V V_FAIL_PASS_V(1U) + +#define S_PASS_FAIL_V 12 +#define V_PASS_FAIL_V(x) ((x) << S_PASS_FAIL_V) +#define F_PASS_FAIL_V V_PASS_FAIL_V(1U) + +#define S_FP_PF_EDGE_NF 11 +#define V_FP_PF_EDGE_NF(x) ((x) << S_FP_PF_EDGE_NF) +#define F_FP_PF_EDGE_NF V_FP_PF_EDGE_NF(1U) + +#define S_NON_SYMETRIC 10 +#define V_NON_SYMETRIC(x) ((x) << S_NON_SYMETRIC) +#define F_NON_SYMETRIC V_NON_SYMETRIC(1U) + +#define S_FULL_RANGE 8 +#define V_FULL_RANGE(x) ((x) << S_FULL_RANGE) +#define F_FULL_RANGE V_FULL_RANGE(1U) + +#define S_QUAD3_EDGES 7 +#define V_QUAD3_EDGES(x) ((x) << S_QUAD3_EDGES) +#define F_QUAD3_EDGES V_QUAD3_EDGES(1U) + +#define S_QUAD2_EDGES 6 +#define V_QUAD2_EDGES(x) ((x) << S_QUAD2_EDGES) +#define F_QUAD2_EDGES V_QUAD2_EDGES(1U) + +#define S_QUAD1_EDGES 5 +#define V_QUAD1_EDGES(x) ((x) << S_QUAD1_EDGES) +#define F_QUAD1_EDGES V_QUAD1_EDGES(1U) + +#define S_QUAD0_EDGES 4 +#define V_QUAD0_EDGES(x) ((x) << S_QUAD0_EDGES) +#define F_QUAD0_EDGES V_QUAD0_EDGES(1U) + +#define S_QUAD3_CAVEAT 3 +#define V_QUAD3_CAVEAT(x) ((x) << S_QUAD3_CAVEAT) +#define F_QUAD3_CAVEAT V_QUAD3_CAVEAT(1U) + +#define S_QUAD2_CAVEAT 2 +#define V_QUAD2_CAVEAT(x) ((x) << S_QUAD2_CAVEAT) +#define F_QUAD2_CAVEAT V_QUAD2_CAVEAT(1U) + +#define S_QUAD1_CAVEAT 1 +#define V_QUAD1_CAVEAT(x) ((x) << S_QUAD1_CAVEAT) +#define F_QUAD1_CAVEAT V_QUAD1_CAVEAT(1U) + +#define S_QUAD0_CAVEAT 0 +#define V_QUAD0_CAVEAT(x) ((x) << S_QUAD0_CAVEAT) +#define F_QUAD0_CAVEAT V_QUAD0_CAVEAT(1U) + +#define A_MC_DDRPHY_DP18_WRCLK_EDGE 0x4417c + +#define S_FAIL_PASS_VALUE 8 +#define M_FAIL_PASS_VALUE 0x7fU +#define V_FAIL_PASS_VALUE(x) ((x) << S_FAIL_PASS_VALUE) +#define G_FAIL_PASS_VALUE(x) (((x) >> S_FAIL_PASS_VALUE) & M_FAIL_PASS_VALUE) + +#define S_PASS_FAIL_VALUE 0 +#define M_PASS_FAIL_VALUE 0xffU +#define V_PASS_FAIL_VALUE(x) ((x) << S_PASS_FAIL_VALUE) +#define G_PASS_FAIL_VALUE(x) (((x) >> S_PASS_FAIL_VALUE) & M_PASS_FAIL_VALUE) + #define A_MC_DDRPHY_DP18_READ_EYE_SIZE0_RANK_PAIR 0x44180 #define S_RD_EYE_SIZE_BITS2_7 8 @@ -40418,7 +59681,45 @@ #define A_MC_DDRPHY_DP18_READ_EYE_SIZE10_RANK_PAIR 0x441a8 #define A_MC_DDRPHY_DP18_READ_EYE_SIZE11_RANK_PAIR 0x441ac #define A_MC_DDRPHY_DP18_RD_DIA_CONFIG3 0x441b4 + +#define S_DESIRED_EDGE_CNTR_TARGET_HIGH 8 +#define M_DESIRED_EDGE_CNTR_TARGET_HIGH 0xffU +#define V_DESIRED_EDGE_CNTR_TARGET_HIGH(x) ((x) << S_DESIRED_EDGE_CNTR_TARGET_HIGH) +#define G_DESIRED_EDGE_CNTR_TARGET_HIGH(x) (((x) >> S_DESIRED_EDGE_CNTR_TARGET_HIGH) & M_DESIRED_EDGE_CNTR_TARGET_HIGH) + +#define S_DESIRED_EDGE_CNTR_TARGET_LOW 0 +#define M_DESIRED_EDGE_CNTR_TARGET_LOW 0xffU +#define V_DESIRED_EDGE_CNTR_TARGET_LOW(x) ((x) << S_DESIRED_EDGE_CNTR_TARGET_LOW) +#define G_DESIRED_EDGE_CNTR_TARGET_LOW(x) (((x) >> S_DESIRED_EDGE_CNTR_TARGET_LOW) & M_DESIRED_EDGE_CNTR_TARGET_LOW) + #define A_MC_DDRPHY_DP18_RD_DIA_CONFIG4 0x441b8 + +#define S_APPROACH_ALIGNMENT 15 +#define V_APPROACH_ALIGNMENT(x) ((x) << S_APPROACH_ALIGNMENT) +#define F_APPROACH_ALIGNMENT V_APPROACH_ALIGNMENT(1U) + +#define A_MC_DDRPHY_DP18_DELAY_LINE_PWR_CTL 0x441bc + +#define S_QUAD0_PWR_CTL 12 +#define M_QUAD0_PWR_CTL 0xfU +#define V_QUAD0_PWR_CTL(x) ((x) << S_QUAD0_PWR_CTL) +#define G_QUAD0_PWR_CTL(x) (((x) >> S_QUAD0_PWR_CTL) & M_QUAD0_PWR_CTL) + +#define S_QUAD1_PWR_CTL 8 +#define M_QUAD1_PWR_CTL 0xfU +#define V_QUAD1_PWR_CTL(x) ((x) << S_QUAD1_PWR_CTL) +#define G_QUAD1_PWR_CTL(x) (((x) >> S_QUAD1_PWR_CTL) & M_QUAD1_PWR_CTL) + +#define S_QUAD2_PWR_CTL 4 +#define M_QUAD2_PWR_CTL 0xfU +#define V_QUAD2_PWR_CTL(x) ((x) << S_QUAD2_PWR_CTL) +#define G_QUAD2_PWR_CTL(x) (((x) >> S_QUAD2_PWR_CTL) & M_QUAD2_PWR_CTL) + +#define S_QUAD3_PWR_CTL 0 +#define M_QUAD3_PWR_CTL 0xfU +#define V_QUAD3_PWR_CTL(x) ((x) << S_QUAD3_PWR_CTL) +#define G_QUAD3_PWR_CTL(x) (((x) >> S_QUAD3_PWR_CTL) & M_QUAD3_PWR_CTL) + #define A_MC_DDRPHY_DP18_READ_TIMING_REFERENCE0 0x441c0 #define S_REFERENCE_BITS1_7 8 @@ -40550,6 +59851,28 @@ #define G_DQ_WR_OFFSET_N3(x) (((x) >> S_DQ_WR_OFFSET_N3) & M_DQ_WR_OFFSET_N3) #define A_MC_DDRPHY_DP18_POWERDOWN_1 0x441fc + +#define S_EYEDAC_PD 13 +#define V_EYEDAC_PD(x) ((x) << S_EYEDAC_PD) +#define F_EYEDAC_PD V_EYEDAC_PD(1U) + +#define S_ANALOG_OUTPUT_STAB 9 +#define V_ANALOG_OUTPUT_STAB(x) ((x) << S_ANALOG_OUTPUT_STAB) +#define F_ANALOG_OUTPUT_STAB V_ANALOG_OUTPUT_STAB(1U) + +#define S_DP18_RX_PD 2 +#define M_DP18_RX_PD 0x3U +#define V_DP18_RX_PD(x) ((x) << S_DP18_RX_PD) +#define G_DP18_RX_PD(x) (((x) >> S_DP18_RX_PD) & M_DP18_RX_PD) + +#define S_DELAY_LINE_CTL_OVERRIDE 4 +#define V_DELAY_LINE_CTL_OVERRIDE(x) ((x) << S_DELAY_LINE_CTL_OVERRIDE) +#define F_DELAY_LINE_CTL_OVERRIDE V_DELAY_LINE_CTL_OVERRIDE(1U) + +#define S_VCC_REG_PD 0 +#define V_VCC_REG_PD(x) ((x) << S_VCC_REG_PD) +#define F_VCC_REG_PD V_VCC_REG_PD(1U) + #define A_MC_ADR_DDRPHY_ADR_BIT_ENABLE 0x45000 #define S_BIT_ENABLE_0_11 4 @@ -40884,6 +60207,40 @@ #define V_ADR_LANE_12_15_PD(x) ((x) << S_ADR_LANE_12_15_PD) #define G_ADR_LANE_12_15_PD(x) (((x) >> S_ADR_LANE_12_15_PD) & M_ADR_LANE_12_15_PD) +#define A_T6_MC_ADR_DDRPHY_ADR_BIT_ENABLE 0x45800 +#define A_T6_MC_ADR_DDRPHY_ADR_DIFFPAIR_ENABLE 0x45804 +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY0 0x45810 +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY1 0x45814 +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY2 0x45818 +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY3 0x4581c +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY4 0x45820 +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY5 0x45824 +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY6 0x45828 +#define A_T6_MC_ADR_DDRPHY_ADR_DELAY7 0x4582c +#define A_T6_MC_ADR_DDRPHY_ADR_DFT_WRAP_STATUS_CONTROL 0x45830 + +#define S_ADR_TEST_MODE 5 +#define M_ADR_TEST_MODE 0x3U +#define V_ADR_TEST_MODE(x) ((x) << S_ADR_TEST_MODE) +#define G_ADR_TEST_MODE(x) (((x) >> S_ADR_TEST_MODE) & M_ADR_TEST_MODE) + +#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN0 0x45840 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN1 0x45844 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN2 0x45848 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_NFET_SLICE_EN3 0x4584c +#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN0 0x45850 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN1 0x45854 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN2 0x45858 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_PFET_SLICE_EN3 0x4585c +#define A_T6_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE 0x45860 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE 0x45868 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_FET_SLICE_EN_MAP0 0x45880 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_FET_SLICE_EN_MAP1 0x45884 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE_MAP0 0x458a0 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_POST_CURSOR_VALUE_MAP1 0x458a4 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE_MAP0 0x458a8 +#define A_T6_MC_ADR_DDRPHY_ADR_IO_SLEW_CTL_VALUE_MAP1 0x458ac +#define A_T6_MC_ADR_DDRPHY_ADR_POWERDOWN_2 0x458b0 #define A_MC_DDRPHY_ADR_PLL_VREG_CONFIG_0 0x460c0 #define S_PLL_TUNE_0_2 13 @@ -40911,6 +60268,7 @@ #define V_PLL_PLLXTR_0_1(x) ((x) << S_PLL_PLLXTR_0_1) #define G_PLL_PLLXTR_0_1(x) (((x) >> S_PLL_PLLXTR_0_1) & M_PLL_PLLXTR_0_1) +#define A_MC_DDRPHY_AD32S_PLL_VREG_CONFIG_0 0x460c0 #define A_MC_DDRPHY_ADR_PLL_VREG_CONFIG_1 0x460c4 #define S_PLL_TUNETDIV_0_2 13 @@ -40950,6 +60308,7 @@ #define V_ANALOG_WRAPON(x) ((x) << S_ANALOG_WRAPON) #define F_ANALOG_WRAPON V_ANALOG_WRAPON(1U) +#define A_MC_DDRPHY_AD32S_PLL_VREG_CONFIG_1 0x460c4 #define A_MC_DDRPHY_ADR_SYSCLK_CNTL_PR 0x460c8 #define S_SYSCLK_ENABLE 15 @@ -40990,6 +60349,7 @@ #define V_CE0DLTVCC(x) ((x) << S_CE0DLTVCC) #define G_CE0DLTVCC(x) (((x) >> S_CE0DLTVCC) & M_CE0DLTVCC) +#define A_MC_DDRPHY_AD32S_SYSCLK_CNTL_PR 0x460c8 #define A_MC_DDRPHY_ADR_MCCLK_WRCLK_PR_STATIC_OFFSET 0x460cc #define S_TSYS_WRCLK 8 @@ -40997,6 +60357,7 @@ #define V_TSYS_WRCLK(x) ((x) << S_TSYS_WRCLK) #define G_TSYS_WRCLK(x) (((x) >> S_TSYS_WRCLK) & M_TSYS_WRCLK) +#define A_MC_DDRPHY_AD32S_MCCLK_WRCLK_PR_STATIC_OFFSET 0x460cc #define A_MC_DDRPHY_ADR_SYSCLK_PR_VALUE_RO 0x460d0 #define S_SLEW_LATE_SAMPLE 15 @@ -41026,6 +60387,7 @@ #define V_SLEW_CNTL(x) ((x) << S_SLEW_CNTL) #define G_SLEW_CNTL(x) (((x) >> S_SLEW_CNTL) & M_SLEW_CNTL) +#define A_MC_DDRPHY_AD32S_SYSCLK_PR_VALUE_RO 0x460d0 #define A_MC_DDRPHY_ADR_GMTEST_ATEST_CNTL 0x460d4 #define S_FLUSH 15 @@ -41066,6 +60428,22 @@ #define V_ATEST1CTL3(x) ((x) << S_ATEST1CTL3) #define F_ATEST1CTL3 V_ATEST1CTL3(1U) +#define A_MC_DDRPHY_AD32S_OUTPUT_FORCE_ATEST_CNTL 0x460d4 + +#define S_FORCE_EN 14 +#define V_FORCE_EN(x) ((x) << S_FORCE_EN) +#define F_FORCE_EN V_FORCE_EN(1U) + +#define S_AD32S_HS_PROBE_A_SEL 8 +#define M_AD32S_HS_PROBE_A_SEL 0xfU +#define V_AD32S_HS_PROBE_A_SEL(x) ((x) << S_AD32S_HS_PROBE_A_SEL) +#define G_AD32S_HS_PROBE_A_SEL(x) (((x) >> S_AD32S_HS_PROBE_A_SEL) & M_AD32S_HS_PROBE_A_SEL) + +#define S_AD32S_HS_PROBE_B_SEL 4 +#define M_AD32S_HS_PROBE_B_SEL 0xfU +#define V_AD32S_HS_PROBE_B_SEL(x) ((x) << S_AD32S_HS_PROBE_B_SEL) +#define G_AD32S_HS_PROBE_B_SEL(x) (((x) >> S_AD32S_HS_PROBE_B_SEL) & M_AD32S_HS_PROBE_B_SEL) + #define A_MC_DDRPHY_ADR_GIANT_MUX_RESULTS_A0 0x460d8 #define S_GIANT_MUX_TEST_RESULTS 0 @@ -41073,7 +60451,15 @@ #define V_GIANT_MUX_TEST_RESULTS(x) ((x) << S_GIANT_MUX_TEST_RESULTS) #define G_GIANT_MUX_TEST_RESULTS(x) (((x) >> S_GIANT_MUX_TEST_RESULTS) & M_GIANT_MUX_TEST_RESULTS) +#define A_MC_DDRPHY_AD32S_OUTPUT_DRIVER_FORCE_VALUE0 0x460d8 + +#define S_OUTPUT_DRIVER_FORCE_VALUE 0 +#define M_OUTPUT_DRIVER_FORCE_VALUE 0xffffU +#define V_OUTPUT_DRIVER_FORCE_VALUE(x) ((x) << S_OUTPUT_DRIVER_FORCE_VALUE) +#define G_OUTPUT_DRIVER_FORCE_VALUE(x) (((x) >> S_OUTPUT_DRIVER_FORCE_VALUE) & M_OUTPUT_DRIVER_FORCE_VALUE) + #define A_MC_DDRPHY_ADR_GIANT_MUX_RESULTS_A1 0x460dc +#define A_MC_DDRPHY_AD32S_OUTPUT_DRIVER_FORCE_VALUE1 0x460dc #define A_MC_DDRPHY_ADR_POWERDOWN_1 0x460e0 #define S_MASTER_PD_CNTL 15 @@ -41109,6 +60495,7 @@ #define V_DVCC_REG_PD(x) ((x) << S_DVCC_REG_PD) #define F_DVCC_REG_PD V_DVCC_REG_PD(1U) +#define A_MC_DDRPHY_AD32S_POWERDOWN_1 0x460e0 #define A_MC_DDRPHY_ADR_SLEW_CAL_CNTL 0x460e4 #define S_SLEW_CAL_ENABLE 15 @@ -41133,6 +60520,7 @@ #define V_SLEW_TARGET_PR_OFFSET(x) ((x) << S_SLEW_TARGET_PR_OFFSET) #define G_SLEW_TARGET_PR_OFFSET(x) (((x) >> S_SLEW_TARGET_PR_OFFSET) & M_SLEW_TARGET_PR_OFFSET) +#define A_MC_DDRPHY_AD32S_SLEW_CAL_CNTL 0x460e4 #define A_MC_DDRPHY_PC_DP18_PLL_LOCK_STATUS 0x47000 #define S_DP18_PLL_LOCK 1 @@ -41285,18 +60673,18 @@ #define V_PER_ENA_SYSCLK_ALIGN(x) ((x) << S_PER_ENA_SYSCLK_ALIGN) #define F_PER_ENA_SYSCLK_ALIGN V_PER_ENA_SYSCLK_ALIGN(1U) -#define S_ENA_PER_RDCLK_ALIGN 9 +#define S_ENA_PER_READ_CTR 9 +#define V_ENA_PER_READ_CTR(x) ((x) << S_ENA_PER_READ_CTR) +#define F_ENA_PER_READ_CTR V_ENA_PER_READ_CTR(1U) + +#define S_ENA_PER_RDCLK_ALIGN 8 #define V_ENA_PER_RDCLK_ALIGN(x) ((x) << S_ENA_PER_RDCLK_ALIGN) #define F_ENA_PER_RDCLK_ALIGN V_ENA_PER_RDCLK_ALIGN(1U) -#define S_ENA_PER_DQS_ALIGN 8 +#define S_ENA_PER_DQS_ALIGN 7 #define V_ENA_PER_DQS_ALIGN(x) ((x) << S_ENA_PER_DQS_ALIGN) #define F_ENA_PER_DQS_ALIGN V_ENA_PER_DQS_ALIGN(1U) -#define S_ENA_PER_READ_CTR 7 -#define V_ENA_PER_READ_CTR(x) ((x) << S_ENA_PER_READ_CTR) -#define F_ENA_PER_READ_CTR V_ENA_PER_READ_CTR(1U) - #define S_PER_NEXT_RANK_PAIR 5 #define M_PER_NEXT_RANK_PAIR 0x3U #define V_PER_NEXT_RANK_PAIR(x) ((x) << S_PER_NEXT_RANK_PAIR) @@ -41314,6 +60702,14 @@ #define V_START_PER_CAL(x) ((x) << S_START_PER_CAL) #define F_START_PER_CAL V_START_PER_CAL(1U) +#define S_ABORT_ON_ERR_EN 1 +#define V_ABORT_ON_ERR_EN(x) ((x) << S_ABORT_ON_ERR_EN) +#define F_ABORT_ON_ERR_EN V_ABORT_ON_ERR_EN(1U) + +#define S_ENA_PER_RD_CTR 9 +#define V_ENA_PER_RD_CTR(x) ((x) << S_ENA_PER_RD_CTR) +#define F_ENA_PER_RD_CTR V_ENA_PER_RD_CTR(1U) + #define A_MC_DDRPHY_PC_CONFIG0 0x47030 #define S_PROTOCOL_DDR 12 @@ -41354,6 +60750,19 @@ #define V_DDR4_VLEVEL_BANK_GROUP(x) ((x) << S_DDR4_VLEVEL_BANK_GROUP) #define F_DDR4_VLEVEL_BANK_GROUP V_DDR4_VLEVEL_BANK_GROUP(1U) +#define S_DDRPHY_PROTOCOL 12 +#define M_DDRPHY_PROTOCOL 0xfU +#define V_DDRPHY_PROTOCOL(x) ((x) << S_DDRPHY_PROTOCOL) +#define G_DDRPHY_PROTOCOL(x) (((x) >> S_DDRPHY_PROTOCOL) & M_DDRPHY_PROTOCOL) + +#define S_SPAM_EN 10 +#define V_SPAM_EN(x) ((x) << S_SPAM_EN) +#define F_SPAM_EN V_SPAM_EN(1U) + +#define S_DDR4_IPW_LOOP_DIS 2 +#define V_DDR4_IPW_LOOP_DIS(x) ((x) << S_DDR4_IPW_LOOP_DIS) +#define F_DDR4_IPW_LOOP_DIS V_DDR4_IPW_LOOP_DIS(1U) + #define A_MC_DDRPHY_PC_CONFIG1 0x47034 #define S_WRITE_LATENCY_OFFSET 12 @@ -41378,6 +60787,19 @@ #define V_DISABLE_MEMCTL_CAL(x) ((x) << S_DISABLE_MEMCTL_CAL) #define F_DISABLE_MEMCTL_CAL V_DISABLE_MEMCTL_CAL(1U) +#define S_MEMCTL_CIS_IGNORE 6 +#define V_MEMCTL_CIS_IGNORE(x) ((x) << S_MEMCTL_CIS_IGNORE) +#define F_MEMCTL_CIS_IGNORE V_MEMCTL_CIS_IGNORE(1U) + +#define S_MEMORY_TYPE 2 +#define M_MEMORY_TYPE 0x7U +#define V_MEMORY_TYPE(x) ((x) << S_MEMORY_TYPE) +#define G_MEMORY_TYPE(x) (((x) >> S_MEMORY_TYPE) & M_MEMORY_TYPE) + +#define S_DDR4_PDA_MODE 1 +#define V_DDR4_PDA_MODE(x) ((x) << S_DDR4_PDA_MODE) +#define F_DDR4_PDA_MODE V_DDR4_PDA_MODE(1U) + #define A_MC_DDRPHY_PC_RESETS 0x47038 #define S_PLL_RESET 15 @@ -41443,6 +60865,30 @@ #define V_RANK_GROUPING(x) ((x) << S_RANK_GROUPING) #define G_RANK_GROUPING(x) (((x) >> S_RANK_GROUPING) & M_RANK_GROUPING) +#define S_ADDR_MIRROR_A3_A4 5 +#define V_ADDR_MIRROR_A3_A4(x) ((x) << S_ADDR_MIRROR_A3_A4) +#define F_ADDR_MIRROR_A3_A4 V_ADDR_MIRROR_A3_A4(1U) + +#define S_ADDR_MIRROR_A5_A6 4 +#define V_ADDR_MIRROR_A5_A6(x) ((x) << S_ADDR_MIRROR_A5_A6) +#define F_ADDR_MIRROR_A5_A6 V_ADDR_MIRROR_A5_A6(1U) + +#define S_ADDR_MIRROR_A7_A8 3 +#define V_ADDR_MIRROR_A7_A8(x) ((x) << S_ADDR_MIRROR_A7_A8) +#define F_ADDR_MIRROR_A7_A8 V_ADDR_MIRROR_A7_A8(1U) + +#define S_ADDR_MIRROR_A11_A13 2 +#define V_ADDR_MIRROR_A11_A13(x) ((x) << S_ADDR_MIRROR_A11_A13) +#define F_ADDR_MIRROR_A11_A13 V_ADDR_MIRROR_A11_A13(1U) + +#define S_ADDR_MIRROR_BA0_BA1 1 +#define V_ADDR_MIRROR_BA0_BA1(x) ((x) << S_ADDR_MIRROR_BA0_BA1) +#define F_ADDR_MIRROR_BA0_BA1 V_ADDR_MIRROR_BA0_BA1(1U) + +#define S_ADDR_MIRROR_BG0_BG1 0 +#define V_ADDR_MIRROR_BG0_BG1(x) ((x) << S_ADDR_MIRROR_BG0_BG1) +#define F_ADDR_MIRROR_BG0_BG1 V_ADDR_MIRROR_BG0_BG1(1U) + #define A_MC_DDRPHY_PC_ERROR_STATUS0 0x47048 #define S_RC_ERROR 15 @@ -41535,6 +60981,19 @@ #define V_VREFDQ1D(x) ((x) << S_VREFDQ1D) #define G_VREFDQ1D(x) (((x) >> S_VREFDQ1D) & M_VREFDQ1D) +#define S_EN_ANALOG_PD 3 +#define V_EN_ANALOG_PD(x) ((x) << S_EN_ANALOG_PD) +#define F_EN_ANALOG_PD V_EN_ANALOG_PD(1U) + +#define S_ANALOG_PD_DLY 2 +#define V_ANALOG_PD_DLY(x) ((x) << S_ANALOG_PD_DLY) +#define F_ANALOG_PD_DLY V_ANALOG_PD_DLY(1U) + +#define S_ANALOG_PD_DIV 0 +#define M_ANALOG_PD_DIV 0x3U +#define V_ANALOG_PD_DIV(x) ((x) << S_ANALOG_PD_DIV) +#define G_ANALOG_PD_DIV(x) (((x) >> S_ANALOG_PD_DIV) & M_ANALOG_PD_DIV) + #define A_MC_DDRPHY_PC_INIT_CAL_CONFIG0 0x47058 #define S_ENA_WR_LEVEL 15 @@ -41669,6 +61128,10 @@ #define V_INIT_CAL_COMPLETE(x) ((x) << S_INIT_CAL_COMPLETE) #define G_INIT_CAL_COMPLETE(x) (((x) >> S_INIT_CAL_COMPLETE) & M_INIT_CAL_COMPLETE) +#define S_PER_CAL_ABORT 6 +#define V_PER_CAL_ABORT(x) ((x) << S_PER_CAL_ABORT) +#define F_PER_CAL_ABORT V_PER_CAL_ABORT(1U) + #define A_MC_DDRPHY_PC_INIT_CAL_MASK 0x47068 #define S_ERROR_WR_LEVEL_MASK 15 @@ -41777,6 +61240,34 @@ #define V_MR_MASK_EN(x) ((x) << S_MR_MASK_EN) #define G_MR_MASK_EN(x) (((x) >> S_MR_MASK_EN) & M_MR_MASK_EN) +#define S_PARITY_DLY 9 +#define V_PARITY_DLY(x) ((x) << S_PARITY_DLY) +#define F_PARITY_DLY V_PARITY_DLY(1U) + +#define S_FORCE_RESERVED 7 +#define V_FORCE_RESERVED(x) ((x) << S_FORCE_RESERVED) +#define F_FORCE_RESERVED V_FORCE_RESERVED(1U) + +#define S_HALT_ROTATION 6 +#define V_HALT_ROTATION(x) ((x) << S_HALT_ROTATION) +#define F_HALT_ROTATION V_HALT_ROTATION(1U) + +#define S_FORCE_MPR 5 +#define V_FORCE_MPR(x) ((x) << S_FORCE_MPR) +#define F_FORCE_MPR V_FORCE_MPR(1U) + +#define S_IPW_SIDEAB_SEL 2 +#define V_IPW_SIDEAB_SEL(x) ((x) << S_IPW_SIDEAB_SEL) +#define F_IPW_SIDEAB_SEL V_IPW_SIDEAB_SEL(1U) + +#define S_PARITY_A17_MASK 1 +#define V_PARITY_A17_MASK(x) ((x) << S_PARITY_A17_MASK) +#define F_PARITY_A17_MASK V_PARITY_A17_MASK(1U) + +#define S_X16_DEVICE 0 +#define V_X16_DEVICE(x) ((x) << S_X16_DEVICE) +#define F_X16_DEVICE V_X16_DEVICE(1U) + #define A_MC_DDRPHY_SEQ_RESERVED_ADDR0 0x4720c #define A_MC_DDRPHY_SEQ_RESERVED_ADDR1 0x47210 #define A_MC_DDRPHY_SEQ_RESERVED_ADDR2 0x47214 @@ -41921,6 +61412,11 @@ #define V_TMRSC_CYCLES(x) ((x) << S_TMRSC_CYCLES) #define G_TMRSC_CYCLES(x) (((x) >> S_TMRSC_CYCLES) & M_TMRSC_CYCLES) +#define S_MRS_CMD_SPACE 0 +#define M_MRS_CMD_SPACE 0xfU +#define V_MRS_CMD_SPACE(x) ((x) << S_MRS_CMD_SPACE) +#define G_MRS_CMD_SPACE(x) (((x) >> S_MRS_CMD_SPACE) & M_MRS_CMD_SPACE) + #define A_MC_DDRPHY_RC_CONFIG0 0x47400 #define S_GLOBAL_PHY_OFFSET 12 @@ -41960,6 +61456,10 @@ #define V_STAGGERED_PATTERN(x) ((x) << S_STAGGERED_PATTERN) #define F_STAGGERED_PATTERN V_STAGGERED_PATTERN(1U) +#define S_ERS_MODE 10 +#define V_ERS_MODE(x) ((x) << S_ERS_MODE) +#define F_ERS_MODE V_ERS_MODE(1U) + #define A_MC_DDRPHY_RC_CONFIG1 0x47404 #define S_OUTER_LOOP_CNT 2 @@ -41983,6 +61483,10 @@ #define V_ALLOW_RD_FIFO_AUTO_R_ESET(x) ((x) << S_ALLOW_RD_FIFO_AUTO_R_ESET) #define F_ALLOW_RD_FIFO_AUTO_R_ESET V_ALLOW_RD_FIFO_AUTO_R_ESET(1U) +#define S_DIS_LOW_PWR_PER_CAL 3 +#define V_DIS_LOW_PWR_PER_CAL(x) ((x) << S_DIS_LOW_PWR_PER_CAL) +#define F_DIS_LOW_PWR_PER_CAL V_DIS_LOW_PWR_PER_CAL(1U) + #define A_MC_DDRPHY_RC_ERROR_STATUS0 0x47414 #define S_RD_CNTL_ERROR 15 @@ -42067,6 +61571,15 @@ #define V_FW_RD_WR(x) ((x) << S_FW_RD_WR) #define G_FW_RD_WR(x) (((x) >> S_FW_RD_WR) & M_FW_RD_WR) +#define S_EN_RESET_WR_DELAY_WL 0 +#define V_EN_RESET_WR_DELAY_WL(x) ((x) << S_EN_RESET_WR_DELAY_WL) +#define F_EN_RESET_WR_DELAY_WL V_EN_RESET_WR_DELAY_WL(1U) + +#define S_TWR_MPR 2 +#define M_TWR_MPR 0xfU +#define V_TWR_MPR(x) ((x) << S_TWR_MPR) +#define G_TWR_MPR(x) (((x) >> S_TWR_MPR) & M_TWR_MPR) + #define A_MC_DDRPHY_WC_ERROR_STATUS0 0x4760c #define S_WR_CNTL_ERROR 15 @@ -42251,6 +61764,69 @@ #define V_ATEST_CNTL(x) ((x) << S_ATEST_CNTL) #define G_ATEST_CNTL(x) (((x) >> S_ATEST_CNTL) & M_ATEST_CNTL) +#define A_MC_DDRPHY_APB_MTCTL_REG0 0x47820 + +#define S_MT_DATA_MUX4_1MODE 15 +#define V_MT_DATA_MUX4_1MODE(x) ((x) << S_MT_DATA_MUX4_1MODE) +#define F_MT_DATA_MUX4_1MODE V_MT_DATA_MUX4_1MODE(1U) + +#define S_MT_PLL_RESET 14 +#define V_MT_PLL_RESET(x) ((x) << S_MT_PLL_RESET) +#define F_MT_PLL_RESET V_MT_PLL_RESET(1U) + +#define S_MT_SYSCLK_RESET 13 +#define V_MT_SYSCLK_RESET(x) ((x) << S_MT_SYSCLK_RESET) +#define F_MT_SYSCLK_RESET V_MT_SYSCLK_RESET(1U) + +#define S_MT_GLOBAL_PHY_OFFSET 9 +#define M_MT_GLOBAL_PHY_OFFSET 0xfU +#define V_MT_GLOBAL_PHY_OFFSET(x) ((x) << S_MT_GLOBAL_PHY_OFFSET) +#define G_MT_GLOBAL_PHY_OFFSET(x) (((x) >> S_MT_GLOBAL_PHY_OFFSET) & M_MT_GLOBAL_PHY_OFFSET) + +#define S_MT_DQ_SEL_QUAD 7 +#define M_MT_DQ_SEL_QUAD 0x3U +#define V_MT_DQ_SEL_QUAD(x) ((x) << S_MT_DQ_SEL_QUAD) +#define G_MT_DQ_SEL_QUAD(x) (((x) >> S_MT_DQ_SEL_QUAD) & M_MT_DQ_SEL_QUAD) + +#define S_MT_PERFORM_RDCLK_ALIGN 6 +#define V_MT_PERFORM_RDCLK_ALIGN(x) ((x) << S_MT_PERFORM_RDCLK_ALIGN) +#define F_MT_PERFORM_RDCLK_ALIGN V_MT_PERFORM_RDCLK_ALIGN(1U) + +#define S_MT_ALIGN_ON_EVEN_CYCLES 5 +#define V_MT_ALIGN_ON_EVEN_CYCLES(x) ((x) << S_MT_ALIGN_ON_EVEN_CYCLES) +#define F_MT_ALIGN_ON_EVEN_CYCLES V_MT_ALIGN_ON_EVEN_CYCLES(1U) + +#define S_MT_WRCLK_CAL_START 4 +#define V_MT_WRCLK_CAL_START(x) ((x) << S_MT_WRCLK_CAL_START) +#define F_MT_WRCLK_CAL_START V_MT_WRCLK_CAL_START(1U) + +#define A_MC_DDRPHY_APB_MTCTL_REG1 0x47824 + +#define S_MT_WPRD_ENABLE 15 +#define V_MT_WPRD_ENABLE(x) ((x) << S_MT_WPRD_ENABLE) +#define F_MT_WPRD_ENABLE V_MT_WPRD_ENABLE(1U) + +#define S_MT_PVTP 10 +#define M_MT_PVTP 0x1fU +#define V_MT_PVTP(x) ((x) << S_MT_PVTP) +#define G_MT_PVTP(x) (((x) >> S_MT_PVTP) & M_MT_PVTP) + +#define S_MT_PVTN 5 +#define M_MT_PVTN 0x1fU +#define V_MT_PVTN(x) ((x) << S_MT_PVTN) +#define G_MT_PVTN(x) (((x) >> S_MT_PVTN) & M_MT_PVTN) + +#define A_MC_DDRPHY_APB_MTSTAT_REG0 0x47828 +#define A_MC_DDRPHY_APB_MTSTAT_REG1 0x4782c + +#define S_MT_ADR32_PLL_LOCK_SUM 1 +#define V_MT_ADR32_PLL_LOCK_SUM(x) ((x) << S_MT_ADR32_PLL_LOCK_SUM) +#define F_MT_ADR32_PLL_LOCK_SUM V_MT_ADR32_PLL_LOCK_SUM(1U) + +#define S_MT_DP18_PLL_LOCK_SUM 0 +#define V_MT_DP18_PLL_LOCK_SUM(x) ((x) << S_MT_DP18_PLL_LOCK_SUM) +#define F_MT_DP18_PLL_LOCK_SUM V_MT_DP18_PLL_LOCK_SUM(1U) + /* registers for module MC_1 */ #define MC_1_BASE_ADDR 0x48000 @@ -42289,6 +61865,19 @@ #define A_EDC_H_INT_ENABLE 0x50074 #define A_EDC_H_INT_CAUSE 0x50078 + +#define S_ECC_UE_INT0_CAUSE 5 +#define V_ECC_UE_INT0_CAUSE(x) ((x) << S_ECC_UE_INT0_CAUSE) +#define F_ECC_UE_INT0_CAUSE V_ECC_UE_INT0_CAUSE(1U) + +#define S_ECC_CE_INT0_CAUSE 4 +#define V_ECC_CE_INT0_CAUSE(x) ((x) << S_ECC_CE_INT0_CAUSE) +#define F_ECC_CE_INT0_CAUSE V_ECC_CE_INT0_CAUSE(1U) + +#define S_PERR_INT0_CAUSE 3 +#define V_PERR_INT0_CAUSE(x) ((x) << S_PERR_INT0_CAUSE) +#define F_PERR_INT0_CAUSE V_PERR_INT0_CAUSE(1U) + #define A_EDC_H_ECC_STATUS 0x5007c #define A_EDC_H_ECC_ERR_SEL 0x50080 @@ -42378,3 +61967,936 @@ #define A_HMA_PAR_ENABLE 0x51300 #define A_HMA_INT_ENABLE 0x51304 #define A_HMA_INT_CAUSE 0x51308 + +/* registers for module EDC_T60 */ +#define EDC_T60_BASE_ADDR 0x50000 + +#define S_QDR_CLKPHASE 24 +#define M_QDR_CLKPHASE 0x7U +#define V_QDR_CLKPHASE(x) ((x) << S_QDR_CLKPHASE) +#define G_QDR_CLKPHASE(x) (((x) >> S_QDR_CLKPHASE) & M_QDR_CLKPHASE) + +#define S_MAXOPSPERTRC 21 +#define M_MAXOPSPERTRC 0x7U +#define V_MAXOPSPERTRC(x) ((x) << S_MAXOPSPERTRC) +#define G_MAXOPSPERTRC(x) (((x) >> S_MAXOPSPERTRC) & M_MAXOPSPERTRC) + +#define S_NUMPIPESTAGES 19 +#define M_NUMPIPESTAGES 0x3U +#define V_NUMPIPESTAGES(x) ((x) << S_NUMPIPESTAGES) +#define G_NUMPIPESTAGES(x) (((x) >> S_NUMPIPESTAGES) & M_NUMPIPESTAGES) + +#define A_EDC_H_DBG_MA_CMD_INTF 0x50300 + +#define S_MCMDADDR 12 +#define M_MCMDADDR 0xfffffU +#define V_MCMDADDR(x) ((x) << S_MCMDADDR) +#define G_MCMDADDR(x) (((x) >> S_MCMDADDR) & M_MCMDADDR) + +#define S_MCMDLEN 5 +#define M_MCMDLEN 0x7fU +#define V_MCMDLEN(x) ((x) << S_MCMDLEN) +#define G_MCMDLEN(x) (((x) >> S_MCMDLEN) & M_MCMDLEN) + +#define S_MCMDNRE 4 +#define V_MCMDNRE(x) ((x) << S_MCMDNRE) +#define F_MCMDNRE V_MCMDNRE(1U) + +#define S_MCMDNRB 3 +#define V_MCMDNRB(x) ((x) << S_MCMDNRB) +#define F_MCMDNRB V_MCMDNRB(1U) + +#define S_MCMDWR 2 +#define V_MCMDWR(x) ((x) << S_MCMDWR) +#define F_MCMDWR V_MCMDWR(1U) + +#define S_MCMDRDY 1 +#define V_MCMDRDY(x) ((x) << S_MCMDRDY) +#define F_MCMDRDY V_MCMDRDY(1U) + +#define S_MCMDVLD 0 +#define V_MCMDVLD(x) ((x) << S_MCMDVLD) +#define F_MCMDVLD V_MCMDVLD(1U) + +#define A_EDC_H_DBG_MA_WDATA_INTF 0x50304 + +#define S_MWDATAVLD 31 +#define V_MWDATAVLD(x) ((x) << S_MWDATAVLD) +#define F_MWDATAVLD V_MWDATAVLD(1U) + +#define S_MWDATARDY 30 +#define V_MWDATARDY(x) ((x) << S_MWDATARDY) +#define F_MWDATARDY V_MWDATARDY(1U) + +#define S_MWDATA 0 +#define M_MWDATA 0x3fffffffU +#define V_MWDATA(x) ((x) << S_MWDATA) +#define G_MWDATA(x) (((x) >> S_MWDATA) & M_MWDATA) + +#define A_EDC_H_DBG_MA_RDATA_INTF 0x50308 + +#define S_MRSPVLD 31 +#define V_MRSPVLD(x) ((x) << S_MRSPVLD) +#define F_MRSPVLD V_MRSPVLD(1U) + +#define S_MRSPRDY 30 +#define V_MRSPRDY(x) ((x) << S_MRSPRDY) +#define F_MRSPRDY V_MRSPRDY(1U) + +#define S_MRSPDATA 0 +#define M_MRSPDATA 0x3fffffffU +#define V_MRSPDATA(x) ((x) << S_MRSPDATA) +#define G_MRSPDATA(x) (((x) >> S_MRSPDATA) & M_MRSPDATA) + +#define A_EDC_H_DBG_BIST_CMD_INTF 0x5030c + +#define S_BCMDADDR 9 +#define M_BCMDADDR 0x7fffffU +#define V_BCMDADDR(x) ((x) << S_BCMDADDR) +#define G_BCMDADDR(x) (((x) >> S_BCMDADDR) & M_BCMDADDR) + +#define S_BCMDLEN 3 +#define M_BCMDLEN 0x3fU +#define V_BCMDLEN(x) ((x) << S_BCMDLEN) +#define G_BCMDLEN(x) (((x) >> S_BCMDLEN) & M_BCMDLEN) + +#define S_BCMDWR 2 +#define V_BCMDWR(x) ((x) << S_BCMDWR) +#define F_BCMDWR V_BCMDWR(1U) + +#define S_BCMDRDY 1 +#define V_BCMDRDY(x) ((x) << S_BCMDRDY) +#define F_BCMDRDY V_BCMDRDY(1U) + +#define S_BCMDVLD 0 +#define V_BCMDVLD(x) ((x) << S_BCMDVLD) +#define F_BCMDVLD V_BCMDVLD(1U) + +#define A_EDC_H_DBG_BIST_WDATA_INTF 0x50310 + +#define S_BWDATAVLD 31 +#define V_BWDATAVLD(x) ((x) << S_BWDATAVLD) +#define F_BWDATAVLD V_BWDATAVLD(1U) + +#define S_BWDATARDY 30 +#define V_BWDATARDY(x) ((x) << S_BWDATARDY) +#define F_BWDATARDY V_BWDATARDY(1U) + +#define S_BWDATA 0 +#define M_BWDATA 0x3fffffffU +#define V_BWDATA(x) ((x) << S_BWDATA) +#define G_BWDATA(x) (((x) >> S_BWDATA) & M_BWDATA) + +#define A_EDC_H_DBG_BIST_RDATA_INTF 0x50314 + +#define S_BRSPVLD 31 +#define V_BRSPVLD(x) ((x) << S_BRSPVLD) +#define F_BRSPVLD V_BRSPVLD(1U) + +#define S_BRSPRDY 30 +#define V_BRSPRDY(x) ((x) << S_BRSPRDY) +#define F_BRSPRDY V_BRSPRDY(1U) + +#define S_BRSPDATA 0 +#define M_BRSPDATA 0x3fffffffU +#define V_BRSPDATA(x) ((x) << S_BRSPDATA) +#define G_BRSPDATA(x) (((x) >> S_BRSPDATA) & M_BRSPDATA) + +#define A_EDC_H_DBG_EDRAM_CMD_INTF 0x50318 + +#define S_EDRAMADDR 16 +#define M_EDRAMADDR 0xffffU +#define V_EDRAMADDR(x) ((x) << S_EDRAMADDR) +#define G_EDRAMADDR(x) (((x) >> S_EDRAMADDR) & M_EDRAMADDR) + +#define S_EDRAMDWSN 8 +#define M_EDRAMDWSN 0xffU +#define V_EDRAMDWSN(x) ((x) << S_EDRAMDWSN) +#define G_EDRAMDWSN(x) (((x) >> S_EDRAMDWSN) & M_EDRAMDWSN) + +#define S_EDRAMCRA 5 +#define M_EDRAMCRA 0x7U +#define V_EDRAMCRA(x) ((x) << S_EDRAMCRA) +#define G_EDRAMCRA(x) (((x) >> S_EDRAMCRA) & M_EDRAMCRA) + +#define S_EDRAMREFENLO 4 +#define V_EDRAMREFENLO(x) ((x) << S_EDRAMREFENLO) +#define F_EDRAMREFENLO V_EDRAMREFENLO(1U) + +#define S_EDRAM1WRENLO 3 +#define V_EDRAM1WRENLO(x) ((x) << S_EDRAM1WRENLO) +#define F_EDRAM1WRENLO V_EDRAM1WRENLO(1U) + +#define S_EDRAM1RDENLO 2 +#define V_EDRAM1RDENLO(x) ((x) << S_EDRAM1RDENLO) +#define F_EDRAM1RDENLO V_EDRAM1RDENLO(1U) + +#define S_EDRAM0WRENLO 1 +#define V_EDRAM0WRENLO(x) ((x) << S_EDRAM0WRENLO) +#define F_EDRAM0WRENLO V_EDRAM0WRENLO(1U) + +#define S_EDRAM0RDENLO 0 +#define V_EDRAM0RDENLO(x) ((x) << S_EDRAM0RDENLO) +#define F_EDRAM0RDENLO V_EDRAM0RDENLO(1U) + +#define A_EDC_H_DBG_EDRAM_WDATA_INTF 0x5031c + +#define S_EDRAMWDATA 9 +#define M_EDRAMWDATA 0x7fffffU +#define V_EDRAMWDATA(x) ((x) << S_EDRAMWDATA) +#define G_EDRAMWDATA(x) (((x) >> S_EDRAMWDATA) & M_EDRAMWDATA) + +#define S_EDRAMWBYTEEN 0 +#define M_EDRAMWBYTEEN 0x1ffU +#define V_EDRAMWBYTEEN(x) ((x) << S_EDRAMWBYTEEN) +#define G_EDRAMWBYTEEN(x) (((x) >> S_EDRAMWBYTEEN) & M_EDRAMWBYTEEN) + +#define A_EDC_H_DBG_EDRAM0_RDATA_INTF 0x50320 +#define A_EDC_H_DBG_EDRAM1_RDATA_INTF 0x50324 +#define A_EDC_H_DBG_MA_WR_REQ_CNT 0x50328 +#define A_EDC_H_DBG_MA_WR_EXP_DAT_CYC_CNT 0x5032c +#define A_EDC_H_DBG_MA_WR_DAT_CYC_CNT 0x50330 +#define A_EDC_H_DBG_MA_RD_REQ_CNT 0x50334 +#define A_EDC_H_DBG_MA_RD_EXP_DAT_CYC_CNT 0x50338 +#define A_EDC_H_DBG_MA_RD_DAT_CYC_CNT 0x5033c +#define A_EDC_H_DBG_BIST_WR_REQ_CNT 0x50340 +#define A_EDC_H_DBG_BIST_WR_EXP_DAT_CYC_CNT 0x50344 +#define A_EDC_H_DBG_BIST_WR_DAT_CYC_CNT 0x50348 +#define A_EDC_H_DBG_BIST_RD_REQ_CNT 0x5034c +#define A_EDC_H_DBG_BIST_RD_EXP_DAT_CYC_CNT 0x50350 +#define A_EDC_H_DBG_BIST_RD_DAT_CYC_CNT 0x50354 +#define A_EDC_H_DBG_EDRAM0_WR_REQ_CNT 0x50358 +#define A_EDC_H_DBG_EDRAM0_RD_REQ_CNT 0x5035c +#define A_EDC_H_DBG_EDRAM0_RMW_CNT 0x50360 +#define A_EDC_H_DBG_EDRAM1_WR_REQ_CNT 0x50364 +#define A_EDC_H_DBG_EDRAM1_RD_REQ_CNT 0x50368 +#define A_EDC_H_DBG_EDRAM1_RMW_CNT 0x5036c +#define A_EDC_H_DBG_EDRAM_REF_BURST_CNT 0x50370 +#define A_EDC_H_DBG_FIFO_STATUS 0x50374 + +#define S_RDTAG_NOTFULL 17 +#define V_RDTAG_NOTFULL(x) ((x) << S_RDTAG_NOTFULL) +#define F_RDTAG_NOTFULL V_RDTAG_NOTFULL(1U) + +#define S_RDTAG_NOTEMPTY 16 +#define V_RDTAG_NOTEMPTY(x) ((x) << S_RDTAG_NOTEMPTY) +#define F_RDTAG_NOTEMPTY V_RDTAG_NOTEMPTY(1U) + +#define S_INP_CMDQ_NOTFULL_ARB 15 +#define V_INP_CMDQ_NOTFULL_ARB(x) ((x) << S_INP_CMDQ_NOTFULL_ARB) +#define F_INP_CMDQ_NOTFULL_ARB V_INP_CMDQ_NOTFULL_ARB(1U) + +#define S_INP_CMDQ_NOTEMPTY 14 +#define V_INP_CMDQ_NOTEMPTY(x) ((x) << S_INP_CMDQ_NOTEMPTY) +#define F_INP_CMDQ_NOTEMPTY V_INP_CMDQ_NOTEMPTY(1U) + +#define S_INP_WRDQ_WRRDY 13 +#define V_INP_WRDQ_WRRDY(x) ((x) << S_INP_WRDQ_WRRDY) +#define F_INP_WRDQ_WRRDY V_INP_WRDQ_WRRDY(1U) + +#define S_INP_WRDQ_NOTEMPTY 12 +#define V_INP_WRDQ_NOTEMPTY(x) ((x) << S_INP_WRDQ_NOTEMPTY) +#define F_INP_WRDQ_NOTEMPTY V_INP_WRDQ_NOTEMPTY(1U) + +#define S_INP_BEQ_WRRDY_OPEN 11 +#define V_INP_BEQ_WRRDY_OPEN(x) ((x) << S_INP_BEQ_WRRDY_OPEN) +#define F_INP_BEQ_WRRDY_OPEN V_INP_BEQ_WRRDY_OPEN(1U) + +#define S_INP_BEQ_NOTEMPTY 10 +#define V_INP_BEQ_NOTEMPTY(x) ((x) << S_INP_BEQ_NOTEMPTY) +#define F_INP_BEQ_NOTEMPTY V_INP_BEQ_NOTEMPTY(1U) + +#define S_RDDQ_NOTFULL_OPEN 9 +#define V_RDDQ_NOTFULL_OPEN(x) ((x) << S_RDDQ_NOTFULL_OPEN) +#define F_RDDQ_NOTFULL_OPEN V_RDDQ_NOTFULL_OPEN(1U) + +#define S_RDDQ_RDCNT 4 +#define M_RDDQ_RDCNT 0x1fU +#define V_RDDQ_RDCNT(x) ((x) << S_RDDQ_RDCNT) +#define G_RDDQ_RDCNT(x) (((x) >> S_RDDQ_RDCNT) & M_RDDQ_RDCNT) + +#define S_RDSIDEQ_NOTFULL 3 +#define V_RDSIDEQ_NOTFULL(x) ((x) << S_RDSIDEQ_NOTFULL) +#define F_RDSIDEQ_NOTFULL V_RDSIDEQ_NOTFULL(1U) + +#define S_RDSIDEQ_NOTEMPTY 2 +#define V_RDSIDEQ_NOTEMPTY(x) ((x) << S_RDSIDEQ_NOTEMPTY) +#define F_RDSIDEQ_NOTEMPTY V_RDSIDEQ_NOTEMPTY(1U) + +#define S_STG_CMDQ_NOTEMPTY 1 +#define V_STG_CMDQ_NOTEMPTY(x) ((x) << S_STG_CMDQ_NOTEMPTY) +#define F_STG_CMDQ_NOTEMPTY V_STG_CMDQ_NOTEMPTY(1U) + +#define S_STG_WRDQ_NOTEMPTY 0 +#define V_STG_WRDQ_NOTEMPTY(x) ((x) << S_STG_WRDQ_NOTEMPTY) +#define F_STG_WRDQ_NOTEMPTY V_STG_WRDQ_NOTEMPTY(1U) + +#define A_EDC_H_DBG_FSM_STATE 0x50378 + +#define S_CMDSPLITFSM 3 +#define V_CMDSPLITFSM(x) ((x) << S_CMDSPLITFSM) +#define F_CMDSPLITFSM V_CMDSPLITFSM(1U) + +#define S_CMDFSM 0 +#define M_CMDFSM 0x7U +#define V_CMDFSM(x) ((x) << S_CMDFSM) +#define G_CMDFSM(x) (((x) >> S_CMDFSM) & M_CMDFSM) + +#define A_EDC_H_DBG_STALL_CYCLES 0x5037c + +#define S_STALL_RMW 19 +#define V_STALL_RMW(x) ((x) << S_STALL_RMW) +#define F_STALL_RMW V_STALL_RMW(1U) + +#define S_STALL_EDC_CMD 18 +#define V_STALL_EDC_CMD(x) ((x) << S_STALL_EDC_CMD) +#define F_STALL_EDC_CMD V_STALL_EDC_CMD(1U) + +#define S_DEAD_CYCLE0 17 +#define V_DEAD_CYCLE0(x) ((x) << S_DEAD_CYCLE0) +#define F_DEAD_CYCLE0 V_DEAD_CYCLE0(1U) + +#define S_DEAD_CYCLE1 16 +#define V_DEAD_CYCLE1(x) ((x) << S_DEAD_CYCLE1) +#define F_DEAD_CYCLE1 V_DEAD_CYCLE1(1U) + +#define S_DEAD_CYCLE0_BBI 15 +#define V_DEAD_CYCLE0_BBI(x) ((x) << S_DEAD_CYCLE0_BBI) +#define F_DEAD_CYCLE0_BBI V_DEAD_CYCLE0_BBI(1U) + +#define S_DEAD_CYCLE1_BBI 14 +#define V_DEAD_CYCLE1_BBI(x) ((x) << S_DEAD_CYCLE1_BBI) +#define F_DEAD_CYCLE1_BBI V_DEAD_CYCLE1_BBI(1U) + +#define S_DEAD_CYCLE0_MAX_OP 13 +#define V_DEAD_CYCLE0_MAX_OP(x) ((x) << S_DEAD_CYCLE0_MAX_OP) +#define F_DEAD_CYCLE0_MAX_OP V_DEAD_CYCLE0_MAX_OP(1U) + +#define S_DEAD_CYCLE1_MAX_OP 12 +#define V_DEAD_CYCLE1_MAX_OP(x) ((x) << S_DEAD_CYCLE1_MAX_OP) +#define F_DEAD_CYCLE1_MAX_OP V_DEAD_CYCLE1_MAX_OP(1U) + +#define S_DEAD_CYCLE0_PRE_REF 11 +#define V_DEAD_CYCLE0_PRE_REF(x) ((x) << S_DEAD_CYCLE0_PRE_REF) +#define F_DEAD_CYCLE0_PRE_REF V_DEAD_CYCLE0_PRE_REF(1U) + +#define S_DEAD_CYCLE1_PRE_REF 10 +#define V_DEAD_CYCLE1_PRE_REF(x) ((x) << S_DEAD_CYCLE1_PRE_REF) +#define F_DEAD_CYCLE1_PRE_REF V_DEAD_CYCLE1_PRE_REF(1U) + +#define S_DEAD_CYCLE0_POST_REF 9 +#define V_DEAD_CYCLE0_POST_REF(x) ((x) << S_DEAD_CYCLE0_POST_REF) +#define F_DEAD_CYCLE0_POST_REF V_DEAD_CYCLE0_POST_REF(1U) + +#define S_DEAD_CYCLE1_POST_REF 8 +#define V_DEAD_CYCLE1_POST_REF(x) ((x) << S_DEAD_CYCLE1_POST_REF) +#define F_DEAD_CYCLE1_POST_REF V_DEAD_CYCLE1_POST_REF(1U) + +#define S_DEAD_CYCLE0_RMW 7 +#define V_DEAD_CYCLE0_RMW(x) ((x) << S_DEAD_CYCLE0_RMW) +#define F_DEAD_CYCLE0_RMW V_DEAD_CYCLE0_RMW(1U) + +#define S_DEAD_CYCLE1_RMW 6 +#define V_DEAD_CYCLE1_RMW(x) ((x) << S_DEAD_CYCLE1_RMW) +#define F_DEAD_CYCLE1_RMW V_DEAD_CYCLE1_RMW(1U) + +#define S_DEAD_CYCLE0_BBI_RMW 5 +#define V_DEAD_CYCLE0_BBI_RMW(x) ((x) << S_DEAD_CYCLE0_BBI_RMW) +#define F_DEAD_CYCLE0_BBI_RMW V_DEAD_CYCLE0_BBI_RMW(1U) + +#define S_DEAD_CYCLE1_BBI_RMW 4 +#define V_DEAD_CYCLE1_BBI_RMW(x) ((x) << S_DEAD_CYCLE1_BBI_RMW) +#define F_DEAD_CYCLE1_BBI_RMW V_DEAD_CYCLE1_BBI_RMW(1U) + +#define S_DEAD_CYCLE0_PRE_REF_RMW 3 +#define V_DEAD_CYCLE0_PRE_REF_RMW(x) ((x) << S_DEAD_CYCLE0_PRE_REF_RMW) +#define F_DEAD_CYCLE0_PRE_REF_RMW V_DEAD_CYCLE0_PRE_REF_RMW(1U) + +#define S_DEAD_CYCLE1_PRE_REF_RMW 2 +#define V_DEAD_CYCLE1_PRE_REF_RMW(x) ((x) << S_DEAD_CYCLE1_PRE_REF_RMW) +#define F_DEAD_CYCLE1_PRE_REF_RMW V_DEAD_CYCLE1_PRE_REF_RMW(1U) + +#define S_DEAD_CYCLE0_POST_REF_RMW 1 +#define V_DEAD_CYCLE0_POST_REF_RMW(x) ((x) << S_DEAD_CYCLE0_POST_REF_RMW) +#define F_DEAD_CYCLE0_POST_REF_RMW V_DEAD_CYCLE0_POST_REF_RMW(1U) + +#define S_DEAD_CYCLE1_POST_REF_RMW 0 +#define V_DEAD_CYCLE1_POST_REF_RMW(x) ((x) << S_DEAD_CYCLE1_POST_REF_RMW) +#define F_DEAD_CYCLE1_POST_REF_RMW V_DEAD_CYCLE1_POST_REF_RMW(1U) + +#define A_EDC_H_DBG_CMD_QUEUE 0x50380 + +#define S_ECMDNRE 31 +#define V_ECMDNRE(x) ((x) << S_ECMDNRE) +#define F_ECMDNRE V_ECMDNRE(1U) + +#define S_ECMDNRB 30 +#define V_ECMDNRB(x) ((x) << S_ECMDNRB) +#define F_ECMDNRB V_ECMDNRB(1U) + +#define S_ECMDWR 29 +#define V_ECMDWR(x) ((x) << S_ECMDWR) +#define F_ECMDWR V_ECMDWR(1U) + +#define S_ECMDLEN 22 +#define M_ECMDLEN 0x7fU +#define V_ECMDLEN(x) ((x) << S_ECMDLEN) +#define G_ECMDLEN(x) (((x) >> S_ECMDLEN) & M_ECMDLEN) + +#define S_ECMDADDR 0 +#define M_ECMDADDR 0x3fffffU +#define V_ECMDADDR(x) ((x) << S_ECMDADDR) +#define G_ECMDADDR(x) (((x) >> S_ECMDADDR) & M_ECMDADDR) + +#define A_EDC_H_DBG_REFRESH 0x50384 + +#define S_REFDONE 12 +#define V_REFDONE(x) ((x) << S_REFDONE) +#define F_REFDONE V_REFDONE(1U) + +#define S_REFCNTEXPR 11 +#define V_REFCNTEXPR(x) ((x) << S_REFCNTEXPR) +#define F_REFCNTEXPR V_REFCNTEXPR(1U) + +#define S_REFPTR 8 +#define M_REFPTR 0x7U +#define V_REFPTR(x) ((x) << S_REFPTR) +#define G_REFPTR(x) (((x) >> S_REFPTR) & M_REFPTR) + +#define S_REFCNT 0 +#define M_REFCNT 0xffU +#define V_REFCNT(x) ((x) << S_REFCNT) +#define G_REFCNT(x) (((x) >> S_REFCNT) & M_REFCNT) + +/* registers for module EDC_T61 */ +#define EDC_T61_BASE_ADDR 0x50800 + +/* registers for module HMA_T6 */ +#define HMA_T6_BASE_ADDR 0x51000 + +#define S_TPH 12 +#define M_TPH 0x3U +#define V_TPH(x) ((x) << S_TPH) +#define G_TPH(x) (((x) >> S_TPH) & M_TPH) + +#define S_TPH_V 11 +#define V_TPH_V(x) ((x) << S_TPH_V) +#define F_TPH_V V_TPH_V(1U) + +#define S_DCA 0 +#define M_DCA 0x7ffU +#define V_DCA(x) ((x) << S_DCA) +#define G_DCA(x) (((x) >> S_DCA) & M_DCA) + +#define A_HMA_CFG 0x51020 + +#define S_OP_MODE 31 +#define V_OP_MODE(x) ((x) << S_OP_MODE) +#define F_OP_MODE V_OP_MODE(1U) + +#define A_HMA_TLB_ACCESS 0x51028 + +#define S_INV_ALL 29 +#define V_INV_ALL(x) ((x) << S_INV_ALL) +#define F_INV_ALL V_INV_ALL(1U) + +#define S_LOCK_ENTRY 28 +#define V_LOCK_ENTRY(x) ((x) << S_LOCK_ENTRY) +#define F_LOCK_ENTRY V_LOCK_ENTRY(1U) + +#define S_E_SEL 0 +#define M_E_SEL 0x1fU +#define V_E_SEL(x) ((x) << S_E_SEL) +#define G_E_SEL(x) (((x) >> S_E_SEL) & M_E_SEL) + +#define A_HMA_TLB_BITS 0x5102c + +#define S_VA 12 +#define M_VA 0xfffffU +#define V_VA(x) ((x) << S_VA) +#define G_VA(x) (((x) >> S_VA) & M_VA) + +#define S_VALID_E 4 +#define V_VALID_E(x) ((x) << S_VALID_E) +#define F_VALID_E V_VALID_E(1U) + +#define S_LOCK_HMA 3 +#define V_LOCK_HMA(x) ((x) << S_LOCK_HMA) +#define F_LOCK_HMA V_LOCK_HMA(1U) + +#define S_T6_USED 2 +#define V_T6_USED(x) ((x) << S_T6_USED) +#define F_T6_USED V_T6_USED(1U) + +#define S_REGION 0 +#define M_REGION 0x3U +#define V_REGION(x) ((x) << S_REGION) +#define G_REGION(x) (((x) >> S_REGION) & M_REGION) + +#define A_HMA_TLB_DESC_0_H 0x51030 +#define A_HMA_TLB_DESC_0_L 0x51034 +#define A_HMA_TLB_DESC_1_H 0x51038 +#define A_HMA_TLB_DESC_1_L 0x5103c +#define A_HMA_TLB_DESC_2_H 0x51040 +#define A_HMA_TLB_DESC_2_L 0x51044 +#define A_HMA_TLB_DESC_3_H 0x51048 +#define A_HMA_TLB_DESC_3_L 0x5104c +#define A_HMA_TLB_DESC_4_H 0x51050 +#define A_HMA_TLB_DESC_4_L 0x51054 +#define A_HMA_TLB_DESC_5_H 0x51058 +#define A_HMA_TLB_DESC_5_L 0x5105c +#define A_HMA_TLB_DESC_6_H 0x51060 +#define A_HMA_TLB_DESC_6_L 0x51064 +#define A_HMA_TLB_DESC_7_H 0x51068 +#define A_HMA_TLB_DESC_7_L 0x5106c +#define A_HMA_REG0_MIN 0x51070 + +#define S_ADDR0_MIN 12 +#define M_ADDR0_MIN 0xfffffU +#define V_ADDR0_MIN(x) ((x) << S_ADDR0_MIN) +#define G_ADDR0_MIN(x) (((x) >> S_ADDR0_MIN) & M_ADDR0_MIN) + +#define A_HMA_REG0_MAX 0x51074 + +#define S_ADDR0_MAX 12 +#define M_ADDR0_MAX 0xfffffU +#define V_ADDR0_MAX(x) ((x) << S_ADDR0_MAX) +#define G_ADDR0_MAX(x) (((x) >> S_ADDR0_MAX) & M_ADDR0_MAX) + +#define A_HMA_REG0_MASK 0x51078 + +#define S_PAGE_SIZE0 12 +#define M_PAGE_SIZE0 0xfffffU +#define V_PAGE_SIZE0(x) ((x) << S_PAGE_SIZE0) +#define G_PAGE_SIZE0(x) (((x) >> S_PAGE_SIZE0) & M_PAGE_SIZE0) + +#define A_HMA_REG0_BASE 0x5107c +#define A_HMA_REG1_MIN 0x51080 + +#define S_ADDR1_MIN 12 +#define M_ADDR1_MIN 0xfffffU +#define V_ADDR1_MIN(x) ((x) << S_ADDR1_MIN) +#define G_ADDR1_MIN(x) (((x) >> S_ADDR1_MIN) & M_ADDR1_MIN) + +#define A_HMA_REG1_MAX 0x51084 + +#define S_ADDR1_MAX 12 +#define M_ADDR1_MAX 0xfffffU +#define V_ADDR1_MAX(x) ((x) << S_ADDR1_MAX) +#define G_ADDR1_MAX(x) (((x) >> S_ADDR1_MAX) & M_ADDR1_MAX) + +#define A_HMA_REG1_MASK 0x51088 + +#define S_PAGE_SIZE1 12 +#define M_PAGE_SIZE1 0xfffffU +#define V_PAGE_SIZE1(x) ((x) << S_PAGE_SIZE1) +#define G_PAGE_SIZE1(x) (((x) >> S_PAGE_SIZE1) & M_PAGE_SIZE1) + +#define A_HMA_REG1_BASE 0x5108c +#define A_HMA_REG2_MIN 0x51090 + +#define S_ADDR2_MIN 12 +#define M_ADDR2_MIN 0xfffffU +#define V_ADDR2_MIN(x) ((x) << S_ADDR2_MIN) +#define G_ADDR2_MIN(x) (((x) >> S_ADDR2_MIN) & M_ADDR2_MIN) + +#define A_HMA_REG2_MAX 0x51094 + +#define S_ADDR2_MAX 12 +#define M_ADDR2_MAX 0xfffffU +#define V_ADDR2_MAX(x) ((x) << S_ADDR2_MAX) +#define G_ADDR2_MAX(x) (((x) >> S_ADDR2_MAX) & M_ADDR2_MAX) + +#define A_HMA_REG2_MASK 0x51098 + +#define S_PAGE_SIZE2 12 +#define M_PAGE_SIZE2 0xfffffU +#define V_PAGE_SIZE2(x) ((x) << S_PAGE_SIZE2) +#define G_PAGE_SIZE2(x) (((x) >> S_PAGE_SIZE2) & M_PAGE_SIZE2) + +#define A_HMA_REG2_BASE 0x5109c +#define A_HMA_REG3_MIN 0x510a0 + +#define S_ADDR3_MIN 12 +#define M_ADDR3_MIN 0xfffffU +#define V_ADDR3_MIN(x) ((x) << S_ADDR3_MIN) +#define G_ADDR3_MIN(x) (((x) >> S_ADDR3_MIN) & M_ADDR3_MIN) + +#define A_HMA_REG3_MAX 0x510a4 + +#define S_ADDR3_MAX 12 +#define M_ADDR3_MAX 0xfffffU +#define V_ADDR3_MAX(x) ((x) << S_ADDR3_MAX) +#define G_ADDR3_MAX(x) (((x) >> S_ADDR3_MAX) & M_ADDR3_MAX) + +#define A_HMA_REG3_MASK 0x510a8 + +#define S_PAGE_SIZE3 12 +#define M_PAGE_SIZE3 0xfffffU +#define V_PAGE_SIZE3(x) ((x) << S_PAGE_SIZE3) +#define G_PAGE_SIZE3(x) (((x) >> S_PAGE_SIZE3) & M_PAGE_SIZE3) + +#define A_HMA_REG3_BASE 0x510ac +#define A_HMA_SW_SYNC 0x510b0 + +#define S_ENTER_SYNC 31 +#define V_ENTER_SYNC(x) ((x) << S_ENTER_SYNC) +#define F_ENTER_SYNC V_ENTER_SYNC(1U) + +#define S_EXIT_SYNC 30 +#define V_EXIT_SYNC(x) ((x) << S_EXIT_SYNC) +#define F_EXIT_SYNC V_EXIT_SYNC(1U) + +#define S_IDTF_INT_ENABLE 5 +#define V_IDTF_INT_ENABLE(x) ((x) << S_IDTF_INT_ENABLE) +#define F_IDTF_INT_ENABLE V_IDTF_INT_ENABLE(1U) + +#define S_OTF_INT_ENABLE 4 +#define V_OTF_INT_ENABLE(x) ((x) << S_OTF_INT_ENABLE) +#define F_OTF_INT_ENABLE V_OTF_INT_ENABLE(1U) + +#define S_RTF_INT_ENABLE 3 +#define V_RTF_INT_ENABLE(x) ((x) << S_RTF_INT_ENABLE) +#define F_RTF_INT_ENABLE V_RTF_INT_ENABLE(1U) + +#define S_PCIEMST_INT_ENABLE 2 +#define V_PCIEMST_INT_ENABLE(x) ((x) << S_PCIEMST_INT_ENABLE) +#define F_PCIEMST_INT_ENABLE V_PCIEMST_INT_ENABLE(1U) + +#define S_MAMST_INT_ENABLE 1 +#define V_MAMST_INT_ENABLE(x) ((x) << S_MAMST_INT_ENABLE) +#define F_MAMST_INT_ENABLE V_MAMST_INT_ENABLE(1U) + +#define S_IDTF_INT_CAUSE 5 +#define V_IDTF_INT_CAUSE(x) ((x) << S_IDTF_INT_CAUSE) +#define F_IDTF_INT_CAUSE V_IDTF_INT_CAUSE(1U) + +#define S_OTF_INT_CAUSE 4 +#define V_OTF_INT_CAUSE(x) ((x) << S_OTF_INT_CAUSE) +#define F_OTF_INT_CAUSE V_OTF_INT_CAUSE(1U) + +#define S_RTF_INT_CAUSE 3 +#define V_RTF_INT_CAUSE(x) ((x) << S_RTF_INT_CAUSE) +#define F_RTF_INT_CAUSE V_RTF_INT_CAUSE(1U) + +#define S_PCIEMST_INT_CAUSE 2 +#define V_PCIEMST_INT_CAUSE(x) ((x) << S_PCIEMST_INT_CAUSE) +#define F_PCIEMST_INT_CAUSE V_PCIEMST_INT_CAUSE(1U) + +#define S_MAMST_INT_CAUSE 1 +#define V_MAMST_INT_CAUSE(x) ((x) << S_MAMST_INT_CAUSE) +#define F_MAMST_INT_CAUSE V_MAMST_INT_CAUSE(1U) + +#define A_HMA_MA_MST_ERR 0x5130c +#define A_HMA_RTF_ERR 0x51310 +#define A_HMA_OTF_ERR 0x51314 +#define A_HMA_IDTF_ERR 0x51318 +#define A_HMA_EXIT_TF 0x5131c + +#define S_RTF 30 +#define V_RTF(x) ((x) << S_RTF) +#define F_RTF V_RTF(1U) + +#define S_OTF 29 +#define V_OTF(x) ((x) << S_OTF) +#define F_OTF V_OTF(1U) + +#define S_IDTF 28 +#define V_IDTF(x) ((x) << S_IDTF) +#define F_IDTF V_IDTF(1U) + +#define A_HMA_LOCAL_DEBUG_CFG 0x51320 +#define A_HMA_LOCAL_DEBUG_RPT 0x51324 +#define A_HMA_DEBUG_FSM_0 0xa000 + +#define S_EDC_FSM 18 +#define M_EDC_FSM 0x1fU +#define V_EDC_FSM(x) ((x) << S_EDC_FSM) +#define G_EDC_FSM(x) (((x) >> S_EDC_FSM) & M_EDC_FSM) + +#define S_RAS_FSM_SLV 15 +#define M_RAS_FSM_SLV 0x7U +#define V_RAS_FSM_SLV(x) ((x) << S_RAS_FSM_SLV) +#define G_RAS_FSM_SLV(x) (((x) >> S_RAS_FSM_SLV) & M_RAS_FSM_SLV) + +#define S_FC_FSM 10 +#define M_FC_FSM 0x1fU +#define V_FC_FSM(x) ((x) << S_FC_FSM) +#define G_FC_FSM(x) (((x) >> S_FC_FSM) & M_FC_FSM) + +#define S_COOKIE_ARB_FSM 8 +#define M_COOKIE_ARB_FSM 0x3U +#define V_COOKIE_ARB_FSM(x) ((x) << S_COOKIE_ARB_FSM) +#define G_COOKIE_ARB_FSM(x) (((x) >> S_COOKIE_ARB_FSM) & M_COOKIE_ARB_FSM) + +#define S_PCIE_CHUNK_FSM 6 +#define M_PCIE_CHUNK_FSM 0x3U +#define V_PCIE_CHUNK_FSM(x) ((x) << S_PCIE_CHUNK_FSM) +#define G_PCIE_CHUNK_FSM(x) (((x) >> S_PCIE_CHUNK_FSM) & M_PCIE_CHUNK_FSM) + +#define S_WTRANSFER_FSM 4 +#define M_WTRANSFER_FSM 0x3U +#define V_WTRANSFER_FSM(x) ((x) << S_WTRANSFER_FSM) +#define G_WTRANSFER_FSM(x) (((x) >> S_WTRANSFER_FSM) & M_WTRANSFER_FSM) + +#define S_WD_FSM 2 +#define M_WD_FSM 0x3U +#define V_WD_FSM(x) ((x) << S_WD_FSM) +#define G_WD_FSM(x) (((x) >> S_WD_FSM) & M_WD_FSM) + +#define S_RD_FSM 0 +#define M_RD_FSM 0x3U +#define V_RD_FSM(x) ((x) << S_RD_FSM) +#define G_RD_FSM(x) (((x) >> S_RD_FSM) & M_RD_FSM) + +#define A_HMA_DEBUG_FSM_1 0xa001 + +#define S_SYNC_FSM 11 +#define M_SYNC_FSM 0x3ffU +#define V_SYNC_FSM(x) ((x) << S_SYNC_FSM) +#define G_SYNC_FSM(x) (((x) >> S_SYNC_FSM) & M_SYNC_FSM) + +#define S_OCHK_FSM 9 +#define M_OCHK_FSM 0x3U +#define V_OCHK_FSM(x) ((x) << S_OCHK_FSM) +#define G_OCHK_FSM(x) (((x) >> S_OCHK_FSM) & M_OCHK_FSM) + +#define S_TLB_FSM 5 +#define M_TLB_FSM 0xfU +#define V_TLB_FSM(x) ((x) << S_TLB_FSM) +#define G_TLB_FSM(x) (((x) >> S_TLB_FSM) & M_TLB_FSM) + +#define S_PIO_FSM 0 +#define M_PIO_FSM 0x1fU +#define V_PIO_FSM(x) ((x) << S_PIO_FSM) +#define G_PIO_FSM(x) (((x) >> S_PIO_FSM) & M_PIO_FSM) + +#define A_HMA_DEBUG_PCIE_INTF 0xa002 + +#define S_T6_H_REQVLD 28 +#define V_T6_H_REQVLD(x) ((x) << S_T6_H_REQVLD) +#define F_T6_H_REQVLD V_T6_H_REQVLD(1U) + +#define S_H_REQFULL 27 +#define V_H_REQFULL(x) ((x) << S_H_REQFULL) +#define F_H_REQFULL V_H_REQFULL(1U) + +#define S_H_REQSOP 26 +#define V_H_REQSOP(x) ((x) << S_H_REQSOP) +#define F_H_REQSOP V_H_REQSOP(1U) + +#define S_H_REQEOP 25 +#define V_H_REQEOP(x) ((x) << S_H_REQEOP) +#define F_H_REQEOP V_H_REQEOP(1U) + +#define S_T6_H_RSPVLD 24 +#define V_T6_H_RSPVLD(x) ((x) << S_T6_H_RSPVLD) +#define F_T6_H_RSPVLD V_T6_H_RSPVLD(1U) + +#define S_H_RSPFULL 23 +#define V_H_RSPFULL(x) ((x) << S_H_RSPFULL) +#define F_H_RSPFULL V_H_RSPFULL(1U) + +#define S_H_RSPSOP 22 +#define V_H_RSPSOP(x) ((x) << S_H_RSPSOP) +#define F_H_RSPSOP V_H_RSPSOP(1U) + +#define S_H_RSPEOP 21 +#define V_H_RSPEOP(x) ((x) << S_H_RSPEOP) +#define F_H_RSPEOP V_H_RSPEOP(1U) + +#define S_H_RSPERR 20 +#define V_H_RSPERR(x) ((x) << S_H_RSPERR) +#define F_H_RSPERR V_H_RSPERR(1U) + +#define S_PCIE_CMD_AVAIL 19 +#define V_PCIE_CMD_AVAIL(x) ((x) << S_PCIE_CMD_AVAIL) +#define F_PCIE_CMD_AVAIL V_PCIE_CMD_AVAIL(1U) + +#define S_PCIE_CMD_RDY 18 +#define V_PCIE_CMD_RDY(x) ((x) << S_PCIE_CMD_RDY) +#define F_PCIE_CMD_RDY V_PCIE_CMD_RDY(1U) + +#define S_PCIE_WNR 17 +#define V_PCIE_WNR(x) ((x) << S_PCIE_WNR) +#define F_PCIE_WNR V_PCIE_WNR(1U) + +#define S_PCIE_LEN 9 +#define M_PCIE_LEN 0xffU +#define V_PCIE_LEN(x) ((x) << S_PCIE_LEN) +#define G_PCIE_LEN(x) (((x) >> S_PCIE_LEN) & M_PCIE_LEN) + +#define S_PCIE_TRWDAT_RDY 8 +#define V_PCIE_TRWDAT_RDY(x) ((x) << S_PCIE_TRWDAT_RDY) +#define F_PCIE_TRWDAT_RDY V_PCIE_TRWDAT_RDY(1U) + +#define S_PCIE_TRWDAT_AVAIL 7 +#define V_PCIE_TRWDAT_AVAIL(x) ((x) << S_PCIE_TRWDAT_AVAIL) +#define F_PCIE_TRWDAT_AVAIL V_PCIE_TRWDAT_AVAIL(1U) + +#define S_PCIE_TRWSOP 6 +#define V_PCIE_TRWSOP(x) ((x) << S_PCIE_TRWSOP) +#define F_PCIE_TRWSOP V_PCIE_TRWSOP(1U) + +#define S_PCIE_TRWEOP 5 +#define V_PCIE_TRWEOP(x) ((x) << S_PCIE_TRWEOP) +#define F_PCIE_TRWEOP V_PCIE_TRWEOP(1U) + +#define S_PCIE_TRRDAT_RDY 4 +#define V_PCIE_TRRDAT_RDY(x) ((x) << S_PCIE_TRRDAT_RDY) +#define F_PCIE_TRRDAT_RDY V_PCIE_TRRDAT_RDY(1U) + +#define S_PCIE_TRRDAT_AVAIL 3 +#define V_PCIE_TRRDAT_AVAIL(x) ((x) << S_PCIE_TRRDAT_AVAIL) +#define F_PCIE_TRRDAT_AVAIL V_PCIE_TRRDAT_AVAIL(1U) + +#define S_PCIE_TRRSOP 2 +#define V_PCIE_TRRSOP(x) ((x) << S_PCIE_TRRSOP) +#define F_PCIE_TRRSOP V_PCIE_TRRSOP(1U) + +#define S_PCIE_TRREOP 1 +#define V_PCIE_TRREOP(x) ((x) << S_PCIE_TRREOP) +#define F_PCIE_TRREOP V_PCIE_TRREOP(1U) + +#define S_PCIE_TRRERR 0 +#define V_PCIE_TRRERR(x) ((x) << S_PCIE_TRRERR) +#define F_PCIE_TRRERR V_PCIE_TRRERR(1U) + +#define A_HMA_DEBUG_PCIE_ADDR_INTERNAL_LO 0xa003 +#define A_HMA_DEBUG_PCIE_ADDR_INTERNAL_HI 0xa004 +#define A_HMA_DEBUG_PCIE_REQ_DATA_EXTERNAL 0xa005 + +#define S_REQDATA2 24 +#define M_REQDATA2 0xffU +#define V_REQDATA2(x) ((x) << S_REQDATA2) +#define G_REQDATA2(x) (((x) >> S_REQDATA2) & M_REQDATA2) + +#define S_REQDATA1 21 +#define M_REQDATA1 0x7U +#define V_REQDATA1(x) ((x) << S_REQDATA1) +#define G_REQDATA1(x) (((x) >> S_REQDATA1) & M_REQDATA1) + +#define S_REQDATA0 0 +#define M_REQDATA0 0x1fffffU +#define V_REQDATA0(x) ((x) << S_REQDATA0) +#define G_REQDATA0(x) (((x) >> S_REQDATA0) & M_REQDATA0) + +#define A_HMA_DEBUG_PCIE_RSP_DATA_EXTERNAL 0xa006 + +#define S_RSPDATA3 24 +#define M_RSPDATA3 0xffU +#define V_RSPDATA3(x) ((x) << S_RSPDATA3) +#define G_RSPDATA3(x) (((x) >> S_RSPDATA3) & M_RSPDATA3) + +#define S_RSPDATA2 16 +#define M_RSPDATA2 0xffU +#define V_RSPDATA2(x) ((x) << S_RSPDATA2) +#define G_RSPDATA2(x) (((x) >> S_RSPDATA2) & M_RSPDATA2) + +#define S_RSPDATA1 8 +#define M_RSPDATA1 0xffU +#define V_RSPDATA1(x) ((x) << S_RSPDATA1) +#define G_RSPDATA1(x) (((x) >> S_RSPDATA1) & M_RSPDATA1) + +#define S_RSPDATA0 0 +#define M_RSPDATA0 0xffU +#define V_RSPDATA0(x) ((x) << S_RSPDATA0) +#define G_RSPDATA0(x) (((x) >> S_RSPDATA0) & M_RSPDATA0) + +#define A_HMA_DEBUG_MA_SLV_CTL 0xa007 + +#define S_MA_CMD_AVAIL 19 +#define V_MA_CMD_AVAIL(x) ((x) << S_MA_CMD_AVAIL) +#define F_MA_CMD_AVAIL V_MA_CMD_AVAIL(1U) + +#define S_MA_CLNT 15 +#define M_MA_CLNT 0xfU +#define V_MA_CLNT(x) ((x) << S_MA_CLNT) +#define G_MA_CLNT(x) (((x) >> S_MA_CLNT) & M_MA_CLNT) + +#define S_MA_WNR 14 +#define V_MA_WNR(x) ((x) << S_MA_WNR) +#define F_MA_WNR V_MA_WNR(1U) + +#define S_MA_LEN 6 +#define M_MA_LEN 0xffU +#define V_MA_LEN(x) ((x) << S_MA_LEN) +#define G_MA_LEN(x) (((x) >> S_MA_LEN) & M_MA_LEN) + +#define S_MA_MST_RD 5 +#define V_MA_MST_RD(x) ((x) << S_MA_MST_RD) +#define F_MA_MST_RD V_MA_MST_RD(1U) + +#define S_MA_MST_VLD 4 +#define V_MA_MST_VLD(x) ((x) << S_MA_MST_VLD) +#define F_MA_MST_VLD V_MA_MST_VLD(1U) + +#define S_MA_MST_ERR 3 +#define V_MA_MST_ERR(x) ((x) << S_MA_MST_ERR) +#define F_MA_MST_ERR V_MA_MST_ERR(1U) + +#define S_MAS_TLB_REQ 2 +#define V_MAS_TLB_REQ(x) ((x) << S_MAS_TLB_REQ) +#define F_MAS_TLB_REQ V_MAS_TLB_REQ(1U) + +#define S_MAS_TLB_ACK 1 +#define V_MAS_TLB_ACK(x) ((x) << S_MAS_TLB_ACK) +#define F_MAS_TLB_ACK V_MAS_TLB_ACK(1U) + +#define S_MAS_TLB_ERR 0 +#define V_MAS_TLB_ERR(x) ((x) << S_MAS_TLB_ERR) +#define F_MAS_TLB_ERR V_MAS_TLB_ERR(1U) + +#define A_HMA_DEBUG_MA_SLV_ADDR_INTERNAL 0xa008 +#define A_HMA_DEBUG_TLB_HIT_ENTRY 0xa009 +#define A_HMA_DEBUG_TLB_HIT_CNT 0xa00a +#define A_HMA_DEBUG_TLB_MISS_CNT 0xa00b +#define A_HMA_DEBUG_PAGE_TBL_LKP_CTL 0xa00c + +#define S_LKP_REQ_VLD 4 +#define V_LKP_REQ_VLD(x) ((x) << S_LKP_REQ_VLD) +#define F_LKP_REQ_VLD V_LKP_REQ_VLD(1U) + +#define S_LKP_DESC_SEL 1 +#define M_LKP_DESC_SEL 0x7U +#define V_LKP_DESC_SEL(x) ((x) << S_LKP_DESC_SEL) +#define G_LKP_DESC_SEL(x) (((x) >> S_LKP_DESC_SEL) & M_LKP_DESC_SEL) + +#define S_LKP_RSP_VLD 0 +#define V_LKP_RSP_VLD(x) ((x) << S_LKP_RSP_VLD) +#define F_LKP_RSP_VLD V_LKP_RSP_VLD(1U) + +#define A_HMA_DEBUG_PAGE_TBL_LKP_REQ_ADDR 0xa00d +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_0 0xa00e +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_1 0xa00f +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_2 0xa010 +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_3 0xa011 +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_4 0xa012 +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_5 0xa013 +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_6 0xa014 +#define A_HMA_DEBUG_PAGE_TBL_LKP_RSP_7 0xa015 +#define A_HMA_DEBUG_PHYS_DESC_INTERNAL_LO 0xa016 +#define A_HMA_DEBUG_PCIE_RD_REQ_CNT_LO 0xa017 +#define A_HMA_DEBUG_PCIE_RD_REQ_CNT_HI 0xa018 +#define A_HMA_DEBUG_PCIE_WR_REQ_CNT_LO 0xa019 +#define A_HMA_DEBUG_PCIE_WR_REQ_CNT_HI 0xa01a +#define A_HMA_DEBUG_PCIE_RD_DATA_CYC_CNT_LO 0xa01b +#define A_HMA_DEBUG_PCIE_RD_DATA_CYC_CNT_HI 0xa01c +#define A_HMA_DEBUG_PCIE_WR_DATA_CYC_CNT_LO 0xa01d +#define A_HMA_DEBUG_PCIE_WR_DATA_CYC_CNT_HI 0xa01e +#define A_HMA_DEBUG_PCIE_SOP_EOP_CNT 0xa01f + +#define S_WR_EOP_CNT 16 +#define M_WR_EOP_CNT 0xffU +#define V_WR_EOP_CNT(x) ((x) << S_WR_EOP_CNT) +#define G_WR_EOP_CNT(x) (((x) >> S_WR_EOP_CNT) & M_WR_EOP_CNT) + +#define S_RD_SOP_CNT 8 +#define M_RD_SOP_CNT 0xffU +#define V_RD_SOP_CNT(x) ((x) << S_RD_SOP_CNT) +#define G_RD_SOP_CNT(x) (((x) >> S_RD_SOP_CNT) & M_RD_SOP_CNT) + +#define S_RD_EOP_CNT 0 +#define M_RD_EOP_CNT 0xffU +#define V_RD_EOP_CNT(x) ((x) << S_RD_EOP_CNT) +#define G_RD_EOP_CNT(x) (((x) >> S_RD_EOP_CNT) & M_RD_EOP_CNT) diff --git a/sys/dev/cxgbe/common/t4_regs_values.h b/sys/dev/cxgbe/common/t4_regs_values.h index 40dabf1e9552..bb1f61f3b20f 100644 --- a/sys/dev/cxgbe/common/t4_regs_values.h +++ b/sys/dev/cxgbe/common/t4_regs_values.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011 Chelsio Communications, Inc. + * Copyright (c) 2011, 2016 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -82,6 +82,16 @@ #define X_INGPCIEBOUNDARY_2048B 6 #define X_INGPCIEBOUNDARY_4096B 7 +#define X_T6_INGPADBOUNDARY_SHIFT 3 +#define X_T6_INGPADBOUNDARY_8B 0 +#define X_T6_INGPADBOUNDARY_16B 1 +#define X_T6_INGPADBOUNDARY_32B 2 +#define X_T6_INGPADBOUNDARY_64B 3 +#define X_T6_INGPADBOUNDARY_128B 4 +#define X_T6_INGPADBOUNDARY_256B 5 +#define X_T6_INGPADBOUNDARY_512B 6 +#define X_T6_INGPADBOUNDARY_1024B 7 + #define X_INGPADBOUNDARY_SHIFT 5 #define X_INGPADBOUNDARY_32B 0 #define X_INGPADBOUNDARY_64B 1 @@ -102,6 +112,17 @@ #define X_EGRPCIEBOUNDARY_2048B 6 #define X_EGRPCIEBOUNDARY_4096B 7 +/* CONTROL2 register */ +#define X_INGPACKBOUNDARY_SHIFT 5 // *most* of the values ... +#define X_INGPACKBOUNDARY_16B 0 // Note weird value! +#define X_INGPACKBOUNDARY_64B 1 +#define X_INGPACKBOUNDARY_128B 2 +#define X_INGPACKBOUNDARY_256B 3 +#define X_INGPACKBOUNDARY_512B 4 +#define X_INGPACKBOUNDARY_1024B 5 +#define X_INGPACKBOUNDARY_2048B 6 +#define X_INGPACKBOUNDARY_4096B 7 + /* GTS register */ #define SGE_TIMERREGS 6 #define X_TIMERREG_COUNTER0 0 @@ -177,6 +198,52 @@ #define X_RSPD_TYPE_CPL 1 #define X_RSPD_TYPE_INTR 2 +/* + * Context field definitions. This is by no means a complete list of SGE + * Context fields. In the vast majority of cases the firmware initializes + * things the way they need to be set up. But in a few small cases, we need + * to compute new values and ship them off to the firmware to be applied to + * the SGE Conexts ... + */ + +/* + * Congestion Manager Definitions. + */ +#define S_CONMCTXT_CNGTPMODE 19 +#define M_CONMCTXT_CNGTPMODE 0x3 +#define V_CONMCTXT_CNGTPMODE(x) ((x) << S_CONMCTXT_CNGTPMODE) +#define G_CONMCTXT_CNGTPMODE(x) \ + (((x) >> S_CONMCTXT_CNGTPMODE) & M_CONMCTXT_CNGTPMODE) +#define S_CONMCTXT_CNGCHMAP 0 +#define M_CONMCTXT_CNGCHMAP 0xffff +#define V_CONMCTXT_CNGCHMAP(x) ((x) << S_CONMCTXT_CNGCHMAP) +#define G_CONMCTXT_CNGCHMAP(x) \ + (((x) >> S_CONMCTXT_CNGCHMAP) & M_CONMCTXT_CNGCHMAP) + +#define X_CONMCTXT_CNGTPMODE_DISABLE 0 +#define X_CONMCTXT_CNGTPMODE_QUEUE 1 +#define X_CONMCTXT_CNGTPMODE_CHANNEL 2 +#define X_CONMCTXT_CNGTPMODE_BOTH 3 + +/* + * T5 and later support a new BAR2-based doorbell mechanism for Egress Queues. + * The User Doorbells are each 128 bytes in length with a Simple Doorbell at + * offsets 8x and a Write Combining single 64-byte Egress Queue Unit + * (X_IDXSIZE_UNIT) Gather Buffer interface at offset 64. For Ingress Queues, + * we have a Going To Sleep register at offsets 8x+4. + * + * As noted above, we have many instances of the Simple Doorbell and Going To + * Sleep registers at offsets 8x and 8x+4, respectively. We want to use a + * non-64-byte aligned offset for the Simple Doorbell in order to attempt to + * avoid buffering of the writes to the Simple Doorbell and we want to use a + * non-contiguous offset for the Going To Sleep writes in order to avoid + * possible combining between them. + */ +#define SGE_UDB_SIZE 128 +#define SGE_UDB_KDOORBELL 8 +#define SGE_UDB_GTS 20 +#define SGE_UDB_WCDOORBELL 64 + /* * CIM definitions. * ================ @@ -188,6 +255,7 @@ #define X_MBOWNER_NONE 0 #define X_MBOWNER_FW 1 #define X_MBOWNER_PL 2 +#define X_MBOWNER_FW_DEFERRED 3 /* * PCI-E definitions. @@ -208,6 +276,9 @@ * selects for a particular field being present. These fields, when present * in the Compressed Filter Tuple, have the following widths in bits. */ +#define S_FT_FIRST S_FCOE +#define S_FT_LAST S_FRAGMENTATION + #define W_FT_FCOE 1 #define W_FT_PORT 3 #define W_FT_VNIC_ID 17 diff --git a/sys/dev/cxgbe/common/t4_tcb.h b/sys/dev/cxgbe/common/t4_tcb.h index 774b058826de..a09b13b50bb8 100644 --- a/sys/dev/cxgbe/common/t4_tcb.h +++ b/sys/dev/cxgbe/common/t4_tcb.h @@ -1,5 +1,5 @@ /*- - * Copyright (c) 2011 Chelsio Communications, Inc. + * Copyright (c) 2011, 2016 Chelsio Communications, Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -332,12 +332,19 @@ #define M_TCB_PDU_HDR_LEN 0xffULL #define V_TCB_PDU_HDR_LEN(x) ((x) << S_TCB_PDU_HDR_LEN) -/* 1023:1001 */ +/* 1019:1001 */ #define W_TCB_AUX1_SLUSH1 31 #define S_TCB_AUX1_SLUSH1 9 -#define M_TCB_AUX1_SLUSH1 0x7fffffULL +#define M_TCB_AUX1_SLUSH1 0x7ffffULL #define V_TCB_AUX1_SLUSH1(x) ((x) << S_TCB_AUX1_SLUSH1) +/* 1023:1020 */ +#define W_TCB_ULP_EXT 31 +#define S_TCP_ULP_EXT 28 +#define M_TCB_ULP_EXT 0xfULL +#define V_TCB_ULP_EXT(x) ((x) << S_TCP_ULP_EXT) + + /* 840:832 */ #define W_TCB_IRS_ULP 26 #define S_TCB_IRS_ULP 0 diff --git a/sys/dev/cxgbe/tom/t4_connect.c b/sys/dev/cxgbe/tom/t4_connect.c index d6855e223b8d..c386a83fea09 100644 --- a/sys/dev/cxgbe/tom/t4_connect.c +++ b/sys/dev/cxgbe/tom/t4_connect.c @@ -144,16 +144,6 @@ done: return (0); } -static inline int -act_open_has_tid(unsigned int status) -{ - - return (status != CPL_ERR_TCAM_FULL && - status != CPL_ERR_TCAM_PARITY && - status != CPL_ERR_CONN_EXIST && - status != CPL_ERR_ARP_MISS); -} - /* * Convert an ACT_OPEN_RPL status to an errno. */ @@ -257,7 +247,7 @@ calc_opt2a(struct socket *so, struct toepcb *toep) opt2 |= F_RX_COALESCE_VALID; else { opt2 |= F_T5_OPT_2_VALID; - opt2 |= F_CONG_CNTRL_VALID; /* OPT_2_ISS really, for T5 */ + opt2 |= F_T5_ISS; } if (sc->tt.rx_coalesce) opt2 |= V_RX_COALESCE(M_RX_COALESCE); -- cgit v1.2.3 From 9fb02a70d0da161f7c3d8f98578e8fa19c4a09d0 Mon Sep 17 00:00:00 2001 From: Bryan Drewery Date: Fri, 19 Feb 2016 00:41:24 +0000 Subject: Avoid reading .depend.* in simple cases where not needed. This will speed up some tree-walks with FAST_DEPEND which otherwise would include length(SRCS) .depend files. This also uses a trick suggested by sjg@ to still read them in when specifying _V_READ_DEPEND=1 in the env/make args. Sponsored by: EMC / Isilon Storage Division --- share/mk/bsd.dep.mk | 12 +++++++++++- sys/conf/kern.post.mk | 11 ++++++++++- 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/share/mk/bsd.dep.mk b/share/mk/bsd.dep.mk index 99fc97257748..6f27d7e117e6 100644 --- a/share/mk/bsd.dep.mk +++ b/share/mk/bsd.dep.mk @@ -81,6 +81,16 @@ tags: ${SRCS} .endif .endif +# Skip reading .depend when not needed to speed up tree-walks +# and simple lookups. +.if !empty(.MAKEFLAGS:M-V${_V_READ_DEPEND}) || make(obj) || make(clean*) || \ + make(install*) +_SKIP_READ_DEPEND= 1 +.if ${MK_DIRDEPS_BUILD} == "no" +.MAKE.DEPENDFILE= /dev/null +.endif +.endif + .if defined(SRCS) CLEANFILES?= @@ -181,7 +191,7 @@ DEPENDSRCS= ${SRCS:M*.[cSC]} ${SRCS:M*.cxx} ${SRCS:M*.cpp} ${SRCS:M*.cc} DEPENDOBJS+= ${DEPENDSRCS:R:S,$,.o,} .endif DEPENDFILES_OBJS= ${DEPENDOBJS:O:u:${DEPEND_FILTER}:C/^/${DEPENDFILE}./} -.if ${.MAKEFLAGS:M-V} == "" +.if !defined(_SKIP_READ_DEPEND) .for __depend_obj in ${DEPENDFILES_OBJS} .sinclude "${__depend_obj}" .endfor diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk index 52cd186a13f0..8983467b4962 100644 --- a/sys/conf/kern.post.mk +++ b/sys/conf/kern.post.mk @@ -206,6 +206,15 @@ CFILES_OFED=${CFILES:M*/ofed/*} # We have "special" -I include paths for MLX5. CFILES_MLX5=${CFILES:M*/dev/mlx5/*} +# Skip reading .depend when not needed to speed up tree-walks +# and simple lookups. +.if !empty(.MAKEFLAGS:M-V${_V_READ_DEPEND}) || make(obj) || make(clean*) || \ + make(install*) || make(kernel-obj) || make(kernel-clean*) || \ + make(kernel-install*) +_SKIP_READ_DEPEND= 1 +.MAKE.DEPENDFILE= /dev/null +.endif + kernel-depend: .depend # The argument list can be very long, so use make -V and xargs to # pass it to mkdep. @@ -222,7 +231,7 @@ DEPEND_CFLAGS+= -MT${.TARGET} CFLAGS+= ${DEPEND_CFLAGS} DEPENDOBJS+= ${SYSTEM_OBJS} genassym.o DEPENDFILES_OBJS= ${DEPENDOBJS:O:u:C/^/.depend./} -.if ${.MAKEFLAGS:M-V} == "" +.if !defined(_SKIP_READ_DEPEND) .for __depend_obj in ${DEPENDFILES_OBJS} .sinclude "${__depend_obj}" .endfor -- cgit v1.2.3 From af96ced5c540ce4b3f1a2f304a90e8b36a4aa429 Mon Sep 17 00:00:00 2001 From: Bryan Drewery Date: Fri, 19 Feb 2016 00:41:38 +0000 Subject: DIRDEPS_BUILD: Add dependencies somehow missed in r295681. Sponsored by: EMC / Isilon Storage Division --- usr.bin/clang/llvm-ar/Makefile.depend | 66 +++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) diff --git a/usr.bin/clang/llvm-ar/Makefile.depend b/usr.bin/clang/llvm-ar/Makefile.depend index 3e7f3f5e7165..e8ffd7966e6b 100644 --- a/usr.bin/clang/llvm-ar/Makefile.depend +++ b/usr.bin/clang/llvm-ar/Makefile.depend @@ -2,10 +2,76 @@ # Autogenerated - do NOT edit! DIRDEPS = \ + gnu/lib/csu \ + gnu/lib/libgcc \ include \ include/xlocale \ + lib/${CSU_DIR} \ + lib/clang/libllvmaarch64asmparser \ + lib/clang/libllvmaarch64codegen \ + lib/clang/libllvmaarch64desc \ + lib/clang/libllvmaarch64disassembler \ + lib/clang/libllvmaarch64info \ + lib/clang/libllvmaarch64instprinter \ + lib/clang/libllvmaarch64utils \ + lib/clang/libllvmanalysis \ + lib/clang/libllvmarmasmparser \ + lib/clang/libllvmarmcodegen \ + lib/clang/libllvmarmdesc \ + lib/clang/libllvmarmdisassembler \ + lib/clang/libllvmarminfo \ + lib/clang/libllvmarminstprinter \ + lib/clang/libllvmasmprinter \ + lib/clang/libllvmbitreader \ + lib/clang/libllvmcodegen \ + lib/clang/libllvmcore \ + lib/clang/libllvminstcombine \ + lib/clang/libllvminstrumentation \ + lib/clang/libllvmipa \ + lib/clang/libllvmlibdriver \ + lib/clang/libllvmmc \ + lib/clang/libllvmmcdisassembler \ + lib/clang/libllvmmcparser \ + lib/clang/libllvmmipsasmparser \ + lib/clang/libllvmmipscodegen \ + lib/clang/libllvmmipsdesc \ + lib/clang/libllvmmipsdisassembler \ + lib/clang/libllvmmipsinfo \ + lib/clang/libllvmmipsinstprinter \ + lib/clang/libllvmobject \ + lib/clang/libllvmoption \ + lib/clang/libllvmpowerpcasmparser \ + lib/clang/libllvmpowerpccodegen \ + lib/clang/libllvmpowerpcdesc \ + lib/clang/libllvmpowerpcdisassembler \ + lib/clang/libllvmpowerpcinfo \ + lib/clang/libllvmpowerpcinstprinter \ + lib/clang/libllvmprofiledata \ + lib/clang/libllvmscalaropts \ + lib/clang/libllvmselectiondag \ + lib/clang/libllvmsparcasmparser \ + lib/clang/libllvmsparccodegen \ + lib/clang/libllvmsparcdesc \ + lib/clang/libllvmsparcdisassembler \ + lib/clang/libllvmsparcinfo \ + lib/clang/libllvmsparcinstprinter \ + lib/clang/libllvmsupport \ + lib/clang/libllvmtarget \ + lib/clang/libllvmtransformutils \ + lib/clang/libllvmx86asmparser \ + lib/clang/libllvmx86codegen \ + lib/clang/libllvmx86desc \ + lib/clang/libllvmx86disassembler \ + lib/clang/libllvmx86info \ + lib/clang/libllvmx86instprinter \ + lib/clang/libllvmx86utils \ + lib/libc \ lib/libc++ \ + lib/libcompiler_rt \ + lib/libthr \ + lib/libz \ lib/msun \ + lib/ncurses/ncursesw \ .include -- cgit v1.2.3 From f84e07cb58811ed640777296c70debe1ae467ba4 Mon Sep 17 00:00:00 2001 From: Bryan Drewery Date: Fri, 19 Feb 2016 00:41:41 +0000 Subject: FAST_DEPEND: Apply conditional -MF from r291945 to kernel as well. Sponsored by: EMC / Isilon Storage Division --- sys/conf/kern.post.mk | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/sys/conf/kern.post.mk b/sys/conf/kern.post.mk index 8983467b4962..01c1eb807220 100644 --- a/sys/conf/kern.post.mk +++ b/sys/conf/kern.post.mk @@ -228,7 +228,14 @@ DEPENDFILES= .depend DEPENDFILES+= .depend.* DEPEND_CFLAGS+= -MD -MP -MF.depend.${.TARGET} DEPEND_CFLAGS+= -MT${.TARGET} +.if defined(.PARSEDIR) +# Only add in DEPEND_CFLAGS for CFLAGS on files we expect from DEPENDOBJS +# as those are the only ones we will include. +DEPEND_CFLAGS_CONDITION= !empty(DEPENDOBJS:M${.TARGET}) +CFLAGS+= ${${DEPEND_CFLAGS_CONDITION}:?${DEPEND_CFLAGS}:} +.else CFLAGS+= ${DEPEND_CFLAGS} +.endif DEPENDOBJS+= ${SYSTEM_OBJS} genassym.o DEPENDFILES_OBJS= ${DEPENDOBJS:O:u:C/^/.depend./} .if !defined(_SKIP_READ_DEPEND) -- cgit v1.2.3 From 5c74f47c9685a478789964a19609b4c83064ecdb Mon Sep 17 00:00:00 2001 From: Maxim Sobolev Date: Fri, 19 Feb 2016 01:00:48 +0000 Subject: Clear up confision as to who the original historical authors of code and manual page were. For whatever reason it listed myself as a primary author, which is just not true. Also, majority of the manpage is copied verbatim from the geom_uzip(4), contributed by ceri, with only minor adjustments from loos, so put ceri back into the copyright secrion where he belongs and reflect that in the AUTHORS section. For what it's worth, I think this one should be deleted and LZMA support just folded back into geom_uzip(4) / mkuzip(4) whete it really belongs. MFC after: 1 month --- share/man/man4/geom_uncompress.4 | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/share/man/man4/geom_uncompress.4 b/share/man/man4/geom_uncompress.4 index 9acaea4e90b9..2bcf0f519e26 100644 --- a/share/man/man4/geom_uncompress.4 +++ b/share/man/man4/geom_uncompress.4 @@ -1,3 +1,4 @@ +.\" Copyright (c) 2006, Ceri Davies .\" Copyright (c) 2014, Luiz Otavio O Souza .\" All rights reserved. .\" @@ -100,8 +101,18 @@ Consumers: The .Nm driver was written by -.An Maxim Sobolev Aq Mt sobomax@FreeBSD.org -and -.An Aleksandr Rybalko Aq Mt ray@FreeBSD.org . +.An Max Khon Aq Mt fjoe@FreeBSD.org +as +.Xr geom_uzip 4 . +.An Aleksandr Rybalko Aq Mt ray@FreeBSD.org +copied it over as +.Nm +and added LZMA functionality . This manual page was written by -.An Luiz Otavio O Souza Aq Mt loos@FreeBSD.org . +.An Ceri Davies Aq Mt ceri@FreeBSD.org +for the +.Xr geom_uzip 8 , +and modified by +.An Luiz Otavio O Souza Aq Mt loos@FreeBSD.org +to match +.Nm . -- cgit v1.2.3 From b2db5624529a6d1418d9fc37865af3cad627bd59 Mon Sep 17 00:00:00 2001 From: Maxim Sobolev Date: Fri, 19 Feb 2016 01:06:45 +0000 Subject: Fix section number of .Xr geom_uzip in r295782. MFC after: 1 months (together with r295782) --- share/man/man4/geom_uncompress.4 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/share/man/man4/geom_uncompress.4 b/share/man/man4/geom_uncompress.4 index 2bcf0f519e26..af12d2705781 100644 --- a/share/man/man4/geom_uncompress.4 +++ b/share/man/man4/geom_uncompress.4 @@ -111,7 +111,7 @@ and added LZMA functionality . This manual page was written by .An Ceri Davies Aq Mt ceri@FreeBSD.org for the -.Xr geom_uzip 8 , +.Xr geom_uzip 4 , and modified by .An Luiz Otavio O Souza Aq Mt loos@FreeBSD.org to match -- cgit v1.2.3 From dd51b8ceaa320d40eb87559670310e7cadac25ba Mon Sep 17 00:00:00 2001 From: Jason Helfman Date: Fri, 19 Feb 2016 01:08:03 +0000 Subject: - language tightening - cleanup SEE ALSO section (thanks brueffer@) Approved by: wblock (mentor) Differential Revision: https://reviews.freebsd.org/D5335 --- share/man/man5/mailer.conf.5 | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) diff --git a/share/man/man5/mailer.conf.5 b/share/man/man5/mailer.conf.5 index 7bb57fc7d993..476bb77d759c 100644 --- a/share/man/man5/mailer.conf.5 +++ b/share/man/man5/mailer.conf.5 @@ -101,9 +101,10 @@ mailq /usr/libexec/sendmail/sendmail newaliases /usr/libexec/sendmail/sendmail .Ed .Pp -This example shows how to invoke a sendmail-workalike like +Using .Nm Postfix -in place of +(from ports) +to replace .Xr sendmail 8 : .Bd -literal -offset indent # Emulate sendmail using postfix @@ -113,12 +114,10 @@ mailq /usr/local/sbin/sendmail newaliases /usr/local/sbin/sendmail .Ed .Pp -This example shows -how to invoke -a sendmail-workalike with +Using .Nm Exim (from ports) -in place of +to replace .Xr sendmail 8 : .Bd -literal -offset indent # Emulate sendmail using exim @@ -129,11 +128,11 @@ newaliases /usr/bin/true rmail /usr/local/sbin/exim -i -oee .Ed .Pp -This example shows the use of the +Using .Nm mini_sendmail -package from ports in place of -.Xr sendmail 8 . -Note the use of additional arguments. +(from ports) +to replace +.Xr sendmail 8 : .Bd -literal -offset indent # Send outgoing mail to a smart relay using mini_sendmail sendmail /usr/local/bin/mini_sendmail -srelayhost @@ -153,14 +152,16 @@ newaliases /usr/libexec/dma rmail /usr/libexec/dma .Ed .Sh SEE ALSO -.Xr dma 8 , .Xr mail 1 , .Xr mailq 1 , .Xr newaliases 1 , +.Xr dma 8 , .Xr mailwrapper 8 , .Xr sendmail 8 .Pp .Xr postfix 1 Pq Pa ports/mail/postfix , +.Xr dma 8 Pq Pa ports/mail/dma , +.Xr exim 8 Pq Pa ports/mail/exim , .Xr mini_sendmail 8 Pq Pa ports/mail/mini_sendmail .Sh HISTORY .Nm -- cgit v1.2.3 From fe169828c3d6b0dfabd753da476928fe23ab54a5 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Fri, 19 Feb 2016 01:35:01 +0000 Subject: Return an error if both EV_ENABLE and EV_DISABLE are specified for a kevent. Currently, this combination results in EV_DISABLE being ignored. Reviewed by: kib Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D5307 --- sys/kern/kern_event.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index 805b6b541612..f96b6d75e79b 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -1116,6 +1116,9 @@ kqueue_register(struct kqueue *kq, struct kevent *kev, struct thread *td, int wa int error, filt, event; int haskqglobal, filedesc_unlock; + if ((kev->flags & (EV_ENABLE | EV_DISABLE)) == (EV_ENABLE | EV_DISABLE)) + return (EINVAL); + fp = NULL; kn = NULL; error = 0; -- cgit v1.2.3 From 88c2beac9cedc0bd3ef6696833ea4d8d6f59faa1 Mon Sep 17 00:00:00 2001 From: Mark Johnston Date: Fri, 19 Feb 2016 01:49:33 +0000 Subject: Ensure that we test the event condition when a disabled kevent is enabled. r274560 modified kqueue_register() to only test the event condition if the corresponding knote is not disabled. However, this check takes place before the EV_ENABLE flag is used to clear the KN_DISABLED flag on the knote, so enabling a previously-disabled kevent would not result in a notification for a triggered event. This change fixes the problem by testing for EV_ENABLED before possibly checking the event condition. This change also updates a kqueue regression test to exercise this case. PR: 206368 Reviewed by: kib Sponsored by: EMC / Isilon Storage Division Differential Revision: https://reviews.freebsd.org/D5307 --- sys/kern/kern_event.c | 19 ++++++++----------- tests/sys/kqueue/read.c | 14 ++++++++------ 2 files changed, 16 insertions(+), 17 deletions(-) diff --git a/sys/kern/kern_event.c b/sys/kern/kern_event.c index f96b6d75e79b..0b6f7daeb513 100644 --- a/sys/kern/kern_event.c +++ b/sys/kern/kern_event.c @@ -1323,27 +1323,24 @@ findkn: * kn_knlist. */ done_ev_add: - if ((kev->flags & EV_DISABLE) && - ((kn->kn_status & KN_DISABLED) == 0)) { + if ((kev->flags & EV_ENABLE) != 0) + kn->kn_status &= ~KN_DISABLED; + else if ((kev->flags & EV_DISABLE) != 0) kn->kn_status |= KN_DISABLED; - } if ((kn->kn_status & KN_DISABLED) == 0) event = kn->kn_fop->f_event(kn, 0); else event = 0; + KQ_LOCK(kq); if (event) - KNOTE_ACTIVATE(kn, 1); + kn->kn_status |= KN_ACTIVE; + if ((kn->kn_status & (KN_ACTIVE | KN_DISABLED | KN_QUEUED)) == + KN_ACTIVE) + knote_enqueue(kn); kn->kn_status &= ~(KN_INFLUX | KN_SCAN); KN_LIST_UNLOCK(kn); - - if ((kev->flags & EV_ENABLE) && (kn->kn_status & KN_DISABLED)) { - kn->kn_status &= ~KN_DISABLED; - if ((kn->kn_status & KN_ACTIVE) && - ((kn->kn_status & KN_QUEUED) == 0)) - knote_enqueue(kn); - } KQ_UNLOCK_FLUX(kq); done: diff --git a/tests/sys/kqueue/read.c b/tests/sys/kqueue/read.c index cc6542719fba..28371444383f 100644 --- a/tests/sys/kqueue/read.c +++ b/tests/sys/kqueue/read.c @@ -124,15 +124,17 @@ test_kevent_socket_disable_and_enable(void) test_begin(test_id); - /* Add an event, then disable it. */ - EV_SET(&kev, sockfd[0], EVFILT_READ, EV_ADD, 0, 0, &sockfd[0]); - if (kevent(kqfd, &kev, 1, NULL, 0, NULL) < 0) - err(1, "%s", test_id); - EV_SET(&kev, sockfd[0], EVFILT_READ, EV_DISABLE, 0, 0, &sockfd[0]); + /* + * Write to the socket before adding the event. This way we can verify that + * enabling a triggered kevent causes the event to be returned immediately. + */ + kevent_socket_fill(); + + /* Add a disabled event. */ + EV_SET(&kev, sockfd[0], EVFILT_READ, EV_ADD | EV_DISABLE, 0, 0, &sockfd[0]); if (kevent(kqfd, &kev, 1, NULL, 0, NULL) < 0) err(1, "%s", test_id); - kevent_socket_fill(); test_no_kevents(); /* Re-enable the knote, then see if an event is generated */ -- cgit v1.2.3 From 29899c0add37cfb9149f97d2611ab17a0271299a Mon Sep 17 00:00:00 2001 From: Kevin Lo Date: Fri, 19 Feb 2016 01:56:59 +0000 Subject: Add missing parentheses. Found by: PVS-Studio --- sys/dev/ixl/if_ixl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index d5ab42fb6025..c3c69d647259 100644 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -6311,7 +6311,7 @@ ixl_vf_config_promisc_msg(struct ixl_pf *pf, struct ixl_vf *vf, return; } - if (!vf->vf_flags & VF_FLAG_PROMISC_CAP) { + if (!(vf->vf_flags & VF_FLAG_PROMISC_CAP)) { i40e_send_vf_nack(pf, vf, I40E_VIRTCHNL_OP_CONFIG_PROMISCUOUS_MODE, I40E_ERR_PARAM); return; -- cgit v1.2.3 From 277db305768ad6765ed735ced43360000c282ed6 Mon Sep 17 00:00:00 2001 From: Kevin Lo Date: Fri, 19 Feb 2016 01:57:51 +0000 Subject: Remove bogus cast. --- sys/dev/iwn/if_iwn.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/dev/iwn/if_iwn.c b/sys/dev/iwn/if_iwn.c index c361fe36aeb7..28b7b7f3f88a 100644 --- a/sys/dev/iwn/if_iwn.c +++ b/sys/dev/iwn/if_iwn.c @@ -417,7 +417,7 @@ iwn_is_3stream_device(struct iwn_softc *sc) static int iwn_attach(device_t dev) { - struct iwn_softc *sc = (struct iwn_softc *)device_get_softc(dev); + struct iwn_softc *sc = device_get_softc(dev); struct ieee80211com *ic; int i, error, rid; -- cgit v1.2.3 From 43cd61606b6bfae52bb09856277751103bfa28fd Mon Sep 17 00:00:00 2001 From: Justin Hibbits Date: Fri, 19 Feb 2016 03:37:56 +0000 Subject: Replace several bus_alloc_resource() calls using default arguments with bus_alloc_resource_any() Since these calls only use default arguments, bus_alloc_resource_any() is the right call. Differential Revision: https://reviews.freebsd.org/D5306 --- sys/dev/arcmsr/arcmsr.c | 2 +- sys/dev/cy/cy_isa.c | 10 +++++----- sys/dev/cy/cy_pci.c | 10 +++++----- sys/dev/ed/if_ed_pccard.c | 4 ++-- sys/dev/fb/s3_pci.c | 8 ++++---- sys/dev/fdc/fdc_pccard.c | 3 +-- sys/dev/hpt27xx/hpt27xx_osm_bsd.c | 4 ++-- sys/dev/hptiop/hptiop.c | 4 ++-- sys/dev/hptmv/entry.c | 2 +- sys/dev/hptnr/hptnr_osm_bsd.c | 4 ++-- sys/dev/hptrr/hptrr_osm_bsd.c | 4 ++-- sys/dev/isci/isci.c | 4 ++-- sys/dev/ixgb/if_ixgb.c | 10 +++++----- sys/dev/lmc/if_lmc.c | 8 ++++---- sys/dev/mrsas/mrsas.c | 4 ++-- sys/dev/mxge/if_mxge.c | 8 ++++---- sys/dev/nvme/nvme_ctrlr.c | 8 ++++---- sys/dev/quicc/quicc_core.c | 12 ++++++------ sys/dev/sound/pci/envy24.c | 20 ++++++++++---------- sys/dev/sound/pci/envy24ht.c | 12 ++++++------ sys/dev/sound/pci/hdspe.c | 8 ++++---- sys/dev/sound/pci/vibes.c | 4 ++-- sys/dev/twa/tw_osl_freebsd.c | 8 ++++---- sys/dev/tws/tws.c | 4 ++-- sys/isa/isa_common.c | 8 ++++---- sys/isa/vga_isa.c | 8 ++++---- sys/mips/sibyte/ata_zbbus.c | 2 +- 27 files changed, 91 insertions(+), 92 deletions(-) diff --git a/sys/dev/arcmsr/arcmsr.c b/sys/dev/arcmsr/arcmsr.c index f1eb7f3d039e..636c2a8e10fe 100644 --- a/sys/dev/arcmsr/arcmsr.c +++ b/sys/dev/arcmsr/arcmsr.c @@ -4323,7 +4323,7 @@ static int arcmsr_attach(device_t dev) } /* After setting up the adapter, map our interrupt */ rid = 0; - irqres = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0ul, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE); + irqres = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if(irqres == NULL || #if __FreeBSD_version >= 700025 bus_setup_intr(dev, irqres, INTR_TYPE_CAM|INTR_ENTROPY|INTR_MPSAFE, NULL, arcmsr_intr_handler, acb, &acb->ih)) { diff --git a/sys/dev/cy/cy_isa.c b/sys/dev/cy/cy_isa.c index 390ccd22dd20..448cc033dcb0 100644 --- a/sys/dev/cy/cy_isa.c +++ b/sys/dev/cy/cy_isa.c @@ -80,8 +80,8 @@ cy_isa_probe(device_t dev) return (ENXIO); mem_rid = 0; - mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, + RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "ioport resource allocation failed\n"); return (ENXIO); @@ -112,8 +112,8 @@ cy_isa_attach(device_t dev) mem_res = NULL; mem_rid = 0; - mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, + RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "memory resource allocation failed\n"); goto fail; @@ -127,7 +127,7 @@ cy_isa_attach(device_t dev) } irq_rid = 0; - irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, 0ul, ~0ul, 0ul, + irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq_rid, RF_SHAREABLE | RF_ACTIVE); if (irq_res == NULL) { device_printf(dev, "interrupt resource allocation failed\n"); diff --git a/sys/dev/cy/cy_pci.c b/sys/dev/cy/cy_pci.c index 3cc3a3bd5df8..d480fd241e6c 100644 --- a/sys/dev/cy/cy_pci.c +++ b/sys/dev/cy/cy_pci.c @@ -114,8 +114,8 @@ cy_pci_attach(dev) mem_res = NULL; ioport_rid = CY_PCI_BASE_ADDR1; - ioport_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &ioport_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + ioport_res = bus_alloc_resource_(dev, SYS_RES_IOPORT, &ioport_rid, + RF_ACTIVE); if (ioport_res == NULL) { device_printf(dev, "ioport resource allocation failed\n"); goto fail; @@ -123,8 +123,8 @@ cy_pci_attach(dev) ioport = rman_get_start(ioport_res); mem_rid = CY_PCI_BASE_ADDR2; - mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &mem_rid, - 0ul, ~0ul, 0ul, RF_ACTIVE); + mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &mem_rid, + RF_ACTIVE); if (mem_res == NULL) { device_printf(dev, "memory resource allocation failed\n"); goto fail; @@ -138,7 +138,7 @@ cy_pci_attach(dev) } irq_rid = 0; - irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &irq_rid, 0ul, ~0ul, 0ul, + irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &irq_rid, RF_SHAREABLE | RF_ACTIVE); if (irq_res == NULL) { device_printf(dev, "interrupt resource allocation failed\n"); diff --git a/sys/dev/ed/if_ed_pccard.c b/sys/dev/ed/if_ed_pccard.c index 07cf892dc431..571b8679dd58 100644 --- a/sys/dev/ed/if_ed_pccard.c +++ b/sys/dev/ed/if_ed_pccard.c @@ -509,8 +509,8 @@ ed_pccard_attach(device_t dev) } if (rman_get_size(sc->port_res) == ED_NOVELL_IO_PORTS / 2) { port_rid++; - sc->port_res2 = bus_alloc_resource(dev, SYS_RES_IOPORT, - &port_rid, 0ul, ~0ul, 1, RF_ACTIVE); + sc->port_res2 = bus_alloc_resource_any(dev, SYS_RES_IOPORT, + &port_rid, RF_ACTIVE); if (sc->port_res2 == NULL || rman_get_size(sc->port_res2) != ED_NOVELL_IO_PORTS / 2) { error = ENXIO; diff --git a/sys/dev/fb/s3_pci.c b/sys/dev/fb/s3_pci.c index c1432e2f005d..4a438fa690fe 100644 --- a/sys/dev/fb/s3_pci.c +++ b/sys/dev/fb/s3_pci.c @@ -478,8 +478,8 @@ s3pci_attach(device_t dev) /* Allocate resources */ rid = 0; - if (!(sc->port_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, - 0ul, ~0ul, 0, RF_ACTIVE | RF_SHAREABLE))) { + if (!(sc->port_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE | RF_SHAREABLE))) { printf("%s: port resource allocation failed!\n", __func__); goto error; } @@ -487,8 +487,8 @@ s3pci_attach(device_t dev) sc->sh = rman_get_bushandle(sc->port_res); rid = 1; - if (!(sc->enh_res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, - 0ul, ~0ul, 0, RF_ACTIVE | RF_SHAREABLE))) { + if (!(sc->enh_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE | RF_SHAREABLE))) { printf("%s: enhanced port resource allocation failed!\n", __func__); goto error; diff --git a/sys/dev/fdc/fdc_pccard.c b/sys/dev/fdc/fdc_pccard.c index 6197dcd6fe58..e04513f55b31 100644 --- a/sys/dev/fdc/fdc_pccard.c +++ b/sys/dev/fdc/fdc_pccard.c @@ -56,8 +56,7 @@ fdc_pccard_alloc_resources(device_t dev, struct fdc_data *fdc) int rid, i; rid = 0; - res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, 1, - RF_ACTIVE); + res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res == NULL) { device_printf(dev, "cannot alloc I/O port range\n"); return (ENXIO); diff --git a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c index 3facf803064f..5dbf33d9ddff 100644 --- a/sys/dev/hpt27xx/hpt27xx_osm_bsd.c +++ b/sys/dev/hpt27xx/hpt27xx_osm_bsd.c @@ -1260,8 +1260,8 @@ static void hpt_final_init(void *dummy) for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, - SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; diff --git a/sys/dev/hptiop/hptiop.c b/sys/dev/hptiop/hptiop.c index 1a2bd2024744..204e13d3cc01 100644 --- a/sys/dev/hptiop/hptiop.c +++ b/sys/dev/hptiop/hptiop.c @@ -2052,8 +2052,8 @@ static int hptiop_attach(device_t dev) xpt_action((union ccb *)&ccb); rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ, - &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ, + &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "allocate irq failed!\n"); goto free_hba_path; } diff --git a/sys/dev/hptmv/entry.c b/sys/dev/hptmv/entry.c index 8eabb2d25974..83cda81f022e 100644 --- a/sys/dev/hptmv/entry.c +++ b/sys/dev/hptmv/entry.c @@ -1990,7 +1990,7 @@ hpt_attach(device_t dev) return rid; rid = 0; - if ((pAdapter->hpt_irq = bus_alloc_resource(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((pAdapter->hpt_irq = bus_alloc_resource_any(pAdapter->hpt_dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { hpt_printk(("can't allocate interrupt\n")); return(ENXIO); diff --git a/sys/dev/hptnr/hptnr_osm_bsd.c b/sys/dev/hptnr/hptnr_osm_bsd.c index de5ce8bc9e43..214303d3c638 100644 --- a/sys/dev/hptnr/hptnr_osm_bsd.c +++ b/sys/dev/hptnr/hptnr_osm_bsd.c @@ -1445,8 +1445,8 @@ static void hpt_final_init(void *dummy) for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, - SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; diff --git a/sys/dev/hptrr/hptrr_osm_bsd.c b/sys/dev/hptrr/hptrr_osm_bsd.c index ad7d79e34664..b5e718a55c11 100644 --- a/sys/dev/hptrr/hptrr_osm_bsd.c +++ b/sys/dev/hptrr/hptrr_osm_bsd.c @@ -1093,8 +1093,8 @@ static void hpt_final_init(void *dummy) for (hba = vbus_ext->hba_list; hba; hba = hba->next) { int rid = 0; - if ((hba->irq_res = bus_alloc_resource(hba->pcidev, - SYS_RES_IRQ, &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) + if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, + SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { os_printk("can't allocate interrupt"); return ; diff --git a/sys/dev/isci/isci.c b/sys/dev/isci/isci.c index 5a1066c81a74..913952360670 100644 --- a/sys/dev/isci/isci.c +++ b/sys/dev/isci/isci.c @@ -138,8 +138,8 @@ isci_allocate_pci_memory(struct isci_softc *isci) struct ISCI_PCI_BAR *pci_bar = &isci->pci_bar[i]; pci_bar->resource_id = PCIR_BAR(i*2); - pci_bar->resource = bus_alloc_resource(isci->device, - SYS_RES_MEMORY, &pci_bar->resource_id, 0, ~0, 1, + pci_bar->resource = bus_alloc_resource_any(isci->device, + SYS_RES_MEMORY, &pci_bar->resource_id, RF_ACTIVE); if(pci_bar->resource == NULL) diff --git a/sys/dev/ixgb/if_ixgb.c b/sys/dev/ixgb/if_ixgb.c index 6f25c0ae44d2..4ef4929253bc 100644 --- a/sys/dev/ixgb/if_ixgb.c +++ b/sys/dev/ixgb/if_ixgb.c @@ -1243,8 +1243,8 @@ ixgb_allocate_pci_resources(struct adapter * adapter) device_t dev = adapter->dev; rid = IXGB_MMBA; - adapter->res_memory = bus_alloc_resource(dev, SYS_RES_MEMORY, - &rid, 0, ~0, 1, + adapter->res_memory = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &rid, RF_ACTIVE); if (!(adapter->res_memory)) { device_printf(dev, "Unable to allocate bus resource: memory\n"); @@ -1257,9 +1257,9 @@ ixgb_allocate_pci_resources(struct adapter * adapter) adapter->hw.hw_addr = (uint8_t *) & adapter->osdep.mem_bus_space_handle; rid = 0x0; - adapter->res_interrupt = bus_alloc_resource(dev, SYS_RES_IRQ, - &rid, 0, ~0, 1, - RF_SHAREABLE | RF_ACTIVE); + adapter->res_interrupt = bus_alloc_resource_any(dev, SYS_RES_IRQ, + &rid, + RF_SHAREABLE | RF_ACTIVE); if (!(adapter->res_interrupt)) { device_printf(dev, "Unable to allocate bus resource: interrupt\n"); diff --git a/sys/dev/lmc/if_lmc.c b/sys/dev/lmc/if_lmc.c index 3ca3d3e280fb..fe22edfe2466 100644 --- a/sys/dev/lmc/if_lmc.c +++ b/sys/dev/lmc/if_lmc.c @@ -4510,8 +4510,8 @@ fbsd_attach(device_t dev) sc->csr_res_id = TLP_CBMA; sc->csr_res_type = SYS_RES_MEMORY; # endif - sc->csr_res = bus_alloc_resource(dev, sc->csr_res_type, &sc->csr_res_id, - 0, ~0, 1, RF_ACTIVE); + sc->csr_res = bus_alloc_resource_any(dev, sc->csr_res_type, &sc->csr_res_id, + RF_ACTIVE); if (sc->csr_res == NULL) { printf("%s: bus_alloc_resource(csr) failed.\n", NAME_UNIT); @@ -4522,8 +4522,8 @@ fbsd_attach(device_t dev) /* Allocate PCI interrupt resources for the card. */ sc->irq_res_id = 0; - sc->irq_res = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irq_res_id, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irq_res_id, + RF_ACTIVE | RF_SHAREABLE); if (sc->irq_res == NULL) { printf("%s: bus_alloc_resource(irq) failed.\n", NAME_UNIT); diff --git a/sys/dev/mrsas/mrsas.c b/sys/dev/mrsas/mrsas.c index a11c5e9eea34..ca8c48070e3e 100644 --- a/sys/dev/mrsas/mrsas.c +++ b/sys/dev/mrsas/mrsas.c @@ -833,8 +833,8 @@ mrsas_attach(device_t dev) bar = pci_read_config(dev, MRSAS_PCI_BAR1, 4); sc->reg_res_id = MRSAS_PCI_BAR1;/* BAR1 offset */ - if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, - &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) + if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &(sc->reg_res_id), RF_ACTIVE)) == NULL) { device_printf(dev, "Cannot allocate PCI registers\n"); goto attach_fail; diff --git a/sys/dev/mxge/if_mxge.c b/sys/dev/mxge/if_mxge.c index ba2af0c26d35..928917f9cf8f 100644 --- a/sys/dev/mxge/if_mxge.c +++ b/sys/dev/mxge/if_mxge.c @@ -4661,8 +4661,8 @@ mxge_add_single_irq(mxge_softc_t *sc) rid = 0; sc->legacy_irq = 1; } - sc->irq_res = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &rid, 0, ~0, - 1, RF_SHAREABLE | RF_ACTIVE); + sc->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &rid, + RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(sc->dev, "could not alloc interrupt\n"); return ENXIO; @@ -4813,8 +4813,8 @@ mxge_attach(device_t dev) /* Map the board into the kernel */ rid = PCIR_BARS; - sc->mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, - ~0, 1, RF_ACTIVE); + sc->mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE); if (sc->mem_res == NULL) { device_printf(dev, "could not map memory\n"); err = ENXIO; diff --git a/sys/dev/nvme/nvme_ctrlr.c b/sys/dev/nvme/nvme_ctrlr.c index 4c8adc02f409..2c8a5649ad1b 100644 --- a/sys/dev/nvme/nvme_ctrlr.c +++ b/sys/dev/nvme/nvme_ctrlr.c @@ -52,8 +52,8 @@ nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) ctrlr->resource_id = PCIR_BAR(0); - ctrlr->resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, - &ctrlr->resource_id, 0, ~0, 1, RF_ACTIVE); + ctrlr->resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, + &ctrlr->resource_id, RF_ACTIVE); if(ctrlr->resource == NULL) { nvme_printf(ctrlr, "unable to allocate pci resource\n"); @@ -72,8 +72,8 @@ nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) * bus_alloc_resource() will just return NULL which is OK. */ ctrlr->bar4_resource_id = PCIR_BAR(4); - ctrlr->bar4_resource = bus_alloc_resource(ctrlr->dev, SYS_RES_MEMORY, - &ctrlr->bar4_resource_id, 0, ~0, 1, RF_ACTIVE); + ctrlr->bar4_resource = bus_alloc_resource_any(ctrlr->dev, SYS_RES_MEMORY, + &ctrlr->bar4_resource_id, RF_ACTIVE); return (0); } diff --git a/sys/dev/quicc/quicc_core.c b/sys/dev/quicc/quicc_core.c index 16525d63e28a..013b24260aa9 100644 --- a/sys/dev/quicc/quicc_core.c +++ b/sys/dev/quicc/quicc_core.c @@ -110,8 +110,8 @@ quicc_bfe_attach(device_t dev) * Re-allocate. We expect that the softc contains the information * collected by quicc_bfe_probe() intact. */ - sc->sc_rres = bus_alloc_resource(dev, sc->sc_rtype, &sc->sc_rrid, - 0, ~0, 0, RF_ACTIVE); + sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, + RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); @@ -228,13 +228,13 @@ quicc_bfe_probe(device_t dev, u_int clock) sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_MEMORY; - sc->sc_rres = bus_alloc_resource(dev, sc->sc_rtype, &sc->sc_rrid, - 0, ~0, 0, RF_ACTIVE); + sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, &sc->sc_rrid, + RF_ACTIVE); if (sc->sc_rres == NULL) { sc->sc_rrid = 0; sc->sc_rtype = SYS_RES_IOPORT; - sc->sc_rres = bus_alloc_resource(dev, sc->sc_rtype, - &sc->sc_rrid, 0, ~0, 0, RF_ACTIVE); + sc->sc_rres = bus_alloc_resource_any(dev, sc->sc_rtype, + &sc->sc_rrid, RF_ACTIVE); if (sc->sc_rres == NULL) return (ENXIO); } diff --git a/sys/dev/sound/pci/envy24.c b/sys/dev/sound/pci/envy24.c index 427f507cec71..c4eaa10557f8 100644 --- a/sys/dev/sound/pci/envy24.c +++ b/sys/dev/sound/pci/envy24.c @@ -2482,17 +2482,17 @@ envy24_alloc_resource(struct sc_info *sc) { /* allocate I/O port resource */ sc->csid = PCIR_CCS; - sc->cs = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->csid, 0, ~0, 1, RF_ACTIVE); + sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->csid, RF_ACTIVE); sc->ddmaid = PCIR_DDMA; - sc->ddma = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->ddmaid, 0, ~0, 1, RF_ACTIVE); + sc->ddma = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->ddmaid, RF_ACTIVE); sc->dsid = PCIR_DS; - sc->ds = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->dsid, 0, ~0, 1, RF_ACTIVE); + sc->ds = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->dsid, RF_ACTIVE); sc->mtid = PCIR_MT; - sc->mt = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->mtid, 0, ~0, 1, RF_ACTIVE); + sc->mt = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->mtid, RF_ACTIVE); if (!sc->cs || !sc->ddma || !sc->ds || !sc->mt) { device_printf(sc->dev, "unable to map IO port space\n"); return ENXIO; @@ -2516,8 +2516,8 @@ envy24_alloc_resource(struct sc_info *sc) /* allocate interrupt resource */ sc->irqid = 0; - sc->irq = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(sc->dev, sc->irq, INTR_MPSAFE, envy24_intr, sc, &sc->ih)) { device_printf(sc->dev, "unable to map interrupt\n"); diff --git a/sys/dev/sound/pci/envy24ht.c b/sys/dev/sound/pci/envy24ht.c index efad4b1f8621..85a36c2cfeb2 100644 --- a/sys/dev/sound/pci/envy24ht.c +++ b/sys/dev/sound/pci/envy24ht.c @@ -2400,11 +2400,11 @@ envy24ht_alloc_resource(struct sc_info *sc) { /* allocate I/O port resource */ sc->csid = PCIR_CCS; - sc->cs = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->csid, 0, ~0, 1, RF_ACTIVE); + sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->csid, RF_ACTIVE); sc->mtid = ENVY24HT_PCIR_MT; - sc->mt = bus_alloc_resource(sc->dev, SYS_RES_IOPORT, - &sc->mtid, 0, ~0, 1, RF_ACTIVE); + sc->mt = bus_alloc_resource_any(sc->dev, SYS_RES_IOPORT, + &sc->mtid, RF_ACTIVE); if (!sc->cs || !sc->mt) { device_printf(sc->dev, "unable to map IO port space\n"); return ENXIO; @@ -2422,8 +2422,8 @@ envy24ht_alloc_resource(struct sc_info *sc) /* allocate interrupt resource */ sc->irqid = 0; - sc->irq = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(sc->dev, sc->irq, INTR_MPSAFE, envy24ht_intr, sc, &sc->ih)) { device_printf(sc->dev, "unable to map interrupt\n"); diff --git a/sys/dev/sound/pci/hdspe.c b/sys/dev/sound/pci/hdspe.c index 8258afceb152..06411a1917d1 100644 --- a/sys/dev/sound/pci/hdspe.c +++ b/sys/dev/sound/pci/hdspe.c @@ -128,8 +128,8 @@ hdspe_alloc_resources(struct sc_info *sc) /* Allocate resource. */ sc->csid = PCIR_BAR(0); - sc->cs = bus_alloc_resource(sc->dev, SYS_RES_MEMORY, - &sc->csid, 0, ~0, 1, RF_ACTIVE); + sc->cs = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY, + &sc->csid, RF_ACTIVE); if (!sc->cs) { device_printf(sc->dev, "Unable to map SYS_RES_MEMORY.\n"); @@ -141,8 +141,8 @@ hdspe_alloc_resources(struct sc_info *sc) /* Allocate interrupt resource. */ sc->irqid = 0; - sc->irq = bus_alloc_resource(sc->dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || bus_setup_intr(sc->dev, sc->irq, INTR_MPSAFE | INTR_TYPE_AV, diff --git a/sys/dev/sound/pci/vibes.c b/sys/dev/sound/pci/vibes.c index 2c7453604471..4d6be306650d 100644 --- a/sys/dev/sound/pci/vibes.c +++ b/sys/dev/sound/pci/vibes.c @@ -759,8 +759,8 @@ sv_attach(device_t dev) { /* Register IRQ handler */ sc->irqid = 0; - sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &sc->irqid, - 0, ~0, 1, RF_ACTIVE | RF_SHAREABLE); + sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, + RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, 0, sv_intr, sc, &sc->ih)) { device_printf(dev, "sv_attach: Unable to map interrupt\n"); diff --git a/sys/dev/twa/tw_osl_freebsd.c b/sys/dev/twa/tw_osl_freebsd.c index dbd249a8d7a9..1bb7574c0820 100644 --- a/sys/dev/twa/tw_osl_freebsd.c +++ b/sys/dev/twa/tw_osl_freebsd.c @@ -338,8 +338,8 @@ twa_attach(device_t dev) return(error); } sc->reg_res_id = PCIR_BARS + bar0_offset; - if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, - &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) + if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &(sc->reg_res_id), RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, @@ -355,8 +355,8 @@ twa_attach(device_t dev) /* Allocate and register our interrupt. */ sc->irq_res_id = 0; - if ((sc->irq_res = bus_alloc_resource(sc->bus_dev, SYS_RES_IRQ, - &(sc->irq_res_id), 0, ~0, 1, + if ((sc->irq_res = bus_alloc_resource_any(sc->bus_dev, SYS_RES_IRQ, + &(sc->irq_res_id), RF_SHAREABLE | RF_ACTIVE)) == NULL) { tw_osli_printf(sc, "error = %d", TW_CL_SEVERITY_ERROR_STRING, diff --git a/sys/dev/tws/tws.c b/sys/dev/tws/tws.c index 4c3529ed8c67..80726a7a6130 100644 --- a/sys/dev/tws/tws.c +++ b/sys/dev/tws/tws.c @@ -245,8 +245,8 @@ tws_attach(device_t dev) /* allocate MMIO register space */ sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */ - if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, - &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) + if ((sc->reg_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, + &(sc->reg_res_id), RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_1; diff --git a/sys/isa/isa_common.c b/sys/isa/isa_common.c index 7c114594f56f..bb03ea0dd651 100644 --- a/sys/isa/isa_common.c +++ b/sys/isa/isa_common.c @@ -150,8 +150,8 @@ isa_find_memory(device_t child, struct isa_config *config, start += MAX(align, 1)) { bus_set_resource(child, SYS_RES_MEMORY, i, start, size); - res[i] = bus_alloc_resource(child, - SYS_RES_MEMORY, &i, 0, ~0, 1, + res[i] = bus_alloc_resource_any(child, + SYS_RES_MEMORY, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_mem[i].ir_start = start; @@ -224,8 +224,8 @@ isa_find_port(device_t child, struct isa_config *config, start += align) { bus_set_resource(child, SYS_RES_IOPORT, i, start, size); - res[i] = bus_alloc_resource(child, - SYS_RES_IOPORT, &i, 0, ~0, 1, + res[i] = bus_alloc_resource_any(child, + SYS_RES_IOPORT, &i, rman_make_alignment_flags(align) /* !RF_ACTIVE */); if (res[i]) { result->ic_port[i].ir_start = start; diff --git a/sys/isa/vga_isa.c b/sys/isa/vga_isa.c index bf7f9e009c41..455760d0f53b 100644 --- a/sys/isa/vga_isa.c +++ b/sys/isa/vga_isa.c @@ -195,11 +195,11 @@ isavga_attach(device_t dev) sc = device_get_softc(dev); rid = 0; - bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, - 0, ~0, 0, RF_ACTIVE | RF_SHAREABLE); + bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, + RF_ACTIVE | RF_SHAREABLE); rid = 0; - bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, - 0, ~0, 0, RF_ACTIVE | RF_SHAREABLE); + bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, + RF_ACTIVE | RF_SHAREABLE); error = vga_attach_unit(unit, sc, device_get_flags(dev)); if (error) diff --git a/sys/mips/sibyte/ata_zbbus.c b/sys/mips/sibyte/ata_zbbus.c index 87a2a42943e9..7f4706300f6d 100644 --- a/sys/mips/sibyte/ata_zbbus.c +++ b/sys/mips/sibyte/ata_zbbus.c @@ -67,7 +67,7 @@ ata_zbbus_attach(device_t dev) ch->attached = 1; rid = 0; - io = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE); + io = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (io == NULL) return (ENXIO); -- cgit v1.2.3 From ad73b301735baec2ce4729a1bedbdad932156d22 Mon Sep 17 00:00:00 2001 From: Adrian Chadd Date: Fri, 19 Feb 2016 05:02:17 +0000 Subject: document some ACPI related sysctls. Submitted by: Oliver Pinter Sponsored by: HardenedBSD Differential Revision: https://reviews.freebsd.org/D5263 --- sys/dev/acpica/acpi.c | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index 686bbc834b77..4565b5b55e6b 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -558,16 +558,20 @@ acpi_attach(device_t dev) device_get_name(dev), CTLFLAG_RD, 0, ""); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "supported_sleep_state", CTLTYPE_STRING | CTLFLAG_RD, - 0, 0, acpi_supported_sleep_state_sysctl, "A", ""); + 0, 0, acpi_supported_sleep_state_sysctl, "A", + "List supported ACPI sleep states."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "power_button_state", CTLTYPE_STRING | CTLFLAG_RW, - &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); + &sc->acpi_power_button_sx, 0, acpi_sleep_state_sysctl, "A", + "Power button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "sleep_button_state", CTLTYPE_STRING | CTLFLAG_RW, - &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", ""); + &sc->acpi_sleep_button_sx, 0, acpi_sleep_state_sysctl, "A", + "Sleep button ACPI sleep state."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "lid_switch_state", CTLTYPE_STRING | CTLFLAG_RW, - &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", ""); + &sc->acpi_lid_switch_sx, 0, acpi_sleep_state_sysctl, "A", + "Lid ACPI sleep state. Set to S3 if you want to suspend your laptop when close the Lid."); SYSCTL_ADD_PROC(&sc->acpi_sysctl_ctx, SYSCTL_CHILDREN(sc->acpi_sysctl_tree), OID_AUTO, "standby_state", CTLTYPE_STRING | CTLFLAG_RW, &sc->acpi_standby_sx, 0, acpi_sleep_state_sysctl, "A", ""); -- cgit v1.2.3 From 5788ded618f6e13ad9d4f83291a06eb59f2bd4a7 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Fri, 19 Feb 2016 05:03:17 +0000 Subject: hyperv/hn: Add option to bind TX taskqueues to the specified CPU It will be used to help tracking host side transmission ring selection issue; and it will be turned on by default, once we have concrete result. Reviewed by: adrian, Jun Su Approved by: adrian (mento) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5316 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 33 ++++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 3 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 9a2b2f74a4ad..483354697618 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -269,6 +269,10 @@ static int hn_use_txdesc_bufring = 1; SYSCTL_INT(_hw_hn, OID_AUTO, use_txdesc_bufring, CTLFLAG_RD, &hn_use_txdesc_bufring, 0, "Use buf_ring for TX descriptors"); +static int hn_bind_tx_taskq = -1; +SYSCTL_INT(_hw_hn, OID_AUTO, bind_tx_taskq, CTLFLAG_RDTUN, + &hn_bind_tx_taskq, 0, "Bind TX taskqueue to the specified cpu"); + /* * Forward declarations */ @@ -383,8 +387,20 @@ netvsc_attach(device_t dev) if (hn_tx_taskq == NULL) { sc->hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &sc->hn_tx_taskq); - taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, "%s tx", - device_get_nameunit(dev)); + if (hn_bind_tx_taskq >= 0) { + int cpu = hn_bind_tx_taskq; + cpuset_t cpu_set; + + if (cpu > mp_ncpus - 1) + cpu = mp_ncpus - 1; + CPU_SETOF(cpu, &cpu_set); + taskqueue_start_threads_cpuset(&sc->hn_tx_taskq, 1, + PI_NET, &cpu_set, "%s tx", + device_get_nameunit(dev)); + } else { + taskqueue_start_threads(&sc->hn_tx_taskq, 1, PI_NET, + "%s tx", device_get_nameunit(dev)); + } } else { sc->hn_tx_taskq = hn_tx_taskq; } @@ -2409,7 +2425,18 @@ hn_tx_taskq_create(void *arg __unused) hn_tx_taskq = taskqueue_create("hn_tx", M_WAITOK, taskqueue_thread_enqueue, &hn_tx_taskq); - taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx"); + if (hn_bind_tx_taskq >= 0) { + int cpu = hn_bind_tx_taskq; + cpuset_t cpu_set; + + if (cpu > mp_ncpus - 1) + cpu = mp_ncpus - 1; + CPU_SETOF(cpu, &cpu_set); + taskqueue_start_threads_cpuset(&hn_tx_taskq, 1, PI_NET, + &cpu_set, "hn tx"); + } else { + taskqueue_start_threads(&hn_tx_taskq, 1, PI_NET, "hn tx"); + } } SYSINIT(hn_txtq_create, SI_SUB_DRIVERS, SI_ORDER_FIRST, hn_tx_taskq_create, NULL); -- cgit v1.2.3 From ea2f3d177611103aad2acc76268535cc7e7fbf2b Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Fri, 19 Feb 2016 05:08:44 +0000 Subject: hyperv/hn: Enable IP header checksum offloading for WIN8 (WinServ2012) Tested on Windows Server 2012. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5317 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 483354697618..ee73a66f2556 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -176,7 +176,7 @@ struct hn_txdesc { * later. UDP checksum offloading doesn't work on earlier * Windows releases. */ -#define HN_CSUM_ASSIST_WIN8 (CSUM_TCP) +#define HN_CSUM_ASSIST_WIN8 (CSUM_IP | CSUM_TCP) #define HN_CSUM_ASSIST (CSUM_IP | CSUM_UDP | CSUM_TCP) #define HN_LRO_LENLIM_DEF (25 * ETHERMTU) -- cgit v1.2.3 From 563d00ab5ca4ff251884144ce52049b41b8654f4 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Fri, 19 Feb 2016 05:13:56 +0000 Subject: hyperv/hn: Free the txdesc buf_ring when the TX ring is destroyed Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5318 --- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index ee73a66f2556..14e44c2091b1 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -2276,6 +2276,11 @@ hn_destroy_tx_ring(struct hn_tx_ring *txr) bus_dma_tag_destroy(txr->hn_tx_data_dtag); if (txr->hn_tx_rndis_dtag != NULL) bus_dma_tag_destroy(txr->hn_tx_rndis_dtag); + +#ifdef HN_USE_TXDESC_BUFRING + buf_ring_free(txr->hn_txdesc_br, M_NETVSC); +#endif + free(txr->hn_txdesc, M_NETVSC); txr->hn_txdesc = NULL; -- cgit v1.2.3 From 665d5ae9a246f886344bfe9f7b8bd5af312ddfe2 Mon Sep 17 00:00:00 2001 From: Andriy Voskoboinyk Date: Fri, 19 Feb 2016 05:59:38 +0000 Subject: net80211: add few missing subtype names. - Add definitions for Timing Advertisement and Control Wrapper frames. - Refresh ieee80211_mgt_subtype_name and ieee80211_ctl_subtype_name arrays. - Count Timing Advertisement frames as discarded management frames in all modes. Approved by: adrian (mentor) Differential Revision: https://reviews.freebsd.org/D5331 --- sys/net80211/ieee80211.h | 2 ++ sys/net80211/ieee80211_adhoc.c | 2 ++ sys/net80211/ieee80211_hostap.c | 1 + sys/net80211/ieee80211_mesh.c | 1 + sys/net80211/ieee80211_proto.c | 6 +++--- sys/net80211/ieee80211_sta.c | 1 + sys/net80211/ieee80211_wds.c | 1 + 7 files changed, 11 insertions(+), 3 deletions(-) diff --git a/sys/net80211/ieee80211.h b/sys/net80211/ieee80211.h index 5bec9af3ea65..925b451e455e 100644 --- a/sys/net80211/ieee80211.h +++ b/sys/net80211/ieee80211.h @@ -129,6 +129,7 @@ struct ieee80211_qosframe_addr4 { #define IEEE80211_FC0_SUBTYPE_REASSOC_RESP 0x30 #define IEEE80211_FC0_SUBTYPE_PROBE_REQ 0x40 #define IEEE80211_FC0_SUBTYPE_PROBE_RESP 0x50 +#define IEEE80211_FC0_SUBTYPE_TIMING_ADV 0x60 #define IEEE80211_FC0_SUBTYPE_BEACON 0x80 #define IEEE80211_FC0_SUBTYPE_ATIM 0x90 #define IEEE80211_FC0_SUBTYPE_DISASSOC 0xa0 @@ -137,6 +138,7 @@ struct ieee80211_qosframe_addr4 { #define IEEE80211_FC0_SUBTYPE_ACTION 0xd0 #define IEEE80211_FC0_SUBTYPE_ACTION_NOACK 0xe0 /* for TYPE_CTL */ +#define IEEE80211_FC0_SUBTYPE_CONTROL_WRAP 0x70 #define IEEE80211_FC0_SUBTYPE_BAR 0x80 #define IEEE80211_FC0_SUBTYPE_BA 0x90 #define IEEE80211_FC0_SUBTYPE_PS_POLL 0xa0 diff --git a/sys/net80211/ieee80211_adhoc.c b/sys/net80211/ieee80211_adhoc.c index d31ad9be0de6..aa568b69c0bf 100644 --- a/sys/net80211/ieee80211_adhoc.c +++ b/sys/net80211/ieee80211_adhoc.c @@ -899,6 +899,7 @@ adhoc_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: + case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: @@ -941,6 +942,7 @@ ahdemo_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: + case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_BEACON: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: diff --git a/sys/net80211/ieee80211_hostap.c b/sys/net80211/ieee80211_hostap.c index 34d1b4ef5bb6..2dd1f6895120 100644 --- a/sys/net80211/ieee80211_hostap.c +++ b/sys/net80211/ieee80211_hostap.c @@ -2226,6 +2226,7 @@ hostap_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: + case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_ATIM: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); diff --git a/sys/net80211/ieee80211_mesh.c b/sys/net80211/ieee80211_mesh.c index 1778c091adc7..40253c0247e1 100644 --- a/sys/net80211/ieee80211_mesh.c +++ b/sys/net80211/ieee80211_mesh.c @@ -2093,6 +2093,7 @@ mesh_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, case IEEE80211_FC0_SUBTYPE_ASSOC_RESP: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: + case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: case IEEE80211_FC0_SUBTYPE_AUTH: diff --git a/sys/net80211/ieee80211_proto.c b/sys/net80211/ieee80211_proto.c index 1b8b5249946a..c2b50af147b8 100644 --- a/sys/net80211/ieee80211_proto.c +++ b/sys/net80211/ieee80211_proto.c @@ -64,14 +64,14 @@ __FBSDID("$FreeBSD$"); const char *ieee80211_mgt_subtype_name[] = { "assoc_req", "assoc_resp", "reassoc_req", "reassoc_resp", - "probe_req", "probe_resp", "reserved#6", "reserved#7", + "probe_req", "probe_resp", "timing_adv", "reserved#7", "beacon", "atim", "disassoc", "auth", "deauth", "action", "action_noack", "reserved#15" }; const char *ieee80211_ctl_subtype_name[] = { "reserved#0", "reserved#1", "reserved#2", "reserved#3", - "reserved#3", "reserved#5", "reserved#6", "reserved#7", - "reserved#8", "reserved#9", "ps_poll", "rts", + "reserved#4", "reserved#5", "reserved#6", "control_wrap", + "bar", "ba", "ps_poll", "rts", "cts", "ack", "cf_end", "cf_end_ack" }; const char *ieee80211_opmode_name[IEEE80211_OPMODE_MAX] = { diff --git a/sys/net80211/ieee80211_sta.c b/sys/net80211/ieee80211_sta.c index 81d75596b81b..0c0296a1ce43 100644 --- a/sys/net80211/ieee80211_sta.c +++ b/sys/net80211/ieee80211_sta.c @@ -1851,6 +1851,7 @@ sta_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, case IEEE80211_FC0_SUBTYPE_ASSOC_REQ: case IEEE80211_FC0_SUBTYPE_REASSOC_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_REQ: + case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_ATIM: IEEE80211_DISCARD(vap, IEEE80211_MSG_INPUT, wh, NULL, "%s", "not handled"); diff --git a/sys/net80211/ieee80211_wds.c b/sys/net80211/ieee80211_wds.c index a444b790bb7b..a9384d426fae 100644 --- a/sys/net80211/ieee80211_wds.c +++ b/sys/net80211/ieee80211_wds.c @@ -782,6 +782,7 @@ wds_recv_mgmt(struct ieee80211_node *ni, struct mbuf *m0, int subtype, case IEEE80211_FC0_SUBTYPE_REASSOC_RESP: case IEEE80211_FC0_SUBTYPE_PROBE_REQ: case IEEE80211_FC0_SUBTYPE_PROBE_RESP: + case IEEE80211_FC0_SUBTYPE_TIMING_ADV: case IEEE80211_FC0_SUBTYPE_BEACON: case IEEE80211_FC0_SUBTYPE_ATIM: case IEEE80211_FC0_SUBTYPE_DISASSOC: -- cgit v1.2.3 From d931334bd47e537c6c173a0bc4d9e67301a91d40 Mon Sep 17 00:00:00 2001 From: Marcelo Araujo Date: Fri, 19 Feb 2016 06:35:53 +0000 Subject: Fix regression introduced on 272446r. lagg(4) supports the protocol none, where it disables any traffic without disabling the lagg(4) interface itself. PR: 206921 Submitted by: Pushkar Kothavade Reviewed by: rpokala Approved by: bapt (mentor) MFC after: 3 weeks Sponsored by: gandi.net Differential Revision: https://reviews.freebsd.org/D5076 --- sys/net/if_lagg.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/net/if_lagg.c b/sys/net/if_lagg.c index e1e1837f3d7c..04771229b73f 100644 --- a/sys/net/if_lagg.c +++ b/sys/net/if_lagg.c @@ -1260,7 +1260,7 @@ lagg_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) error = priv_check(td, PRIV_NET_LAGG); if (error) break; - if (ra->ra_proto < 1 || ra->ra_proto >= LAGG_PROTO_MAX) { + if (ra->ra_proto >= LAGG_PROTO_MAX) { error = EPROTONOSUPPORT; break; } -- cgit v1.2.3 From 3ef58843aa512a737604b874299b7a7aa889547b Mon Sep 17 00:00:00 2001 From: Kevin Lo Date: Fri, 19 Feb 2016 06:50:00 +0000 Subject: Remove sys/types.h --- lib/libc/gen/directory.3 | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/lib/libc/gen/directory.3 b/lib/libc/gen/directory.3 index f0d0f4b97f0d..39198056ebb6 100644 --- a/lib/libc/gen/directory.3 +++ b/lib/libc/gen/directory.3 @@ -28,7 +28,7 @@ .\" @(#)directory.3 8.1 (Berkeley) 6/4/93 .\" $FreeBSD$ .\" -.Dd May 6, 2015 +.Dd February 19, 2016 .Dt DIRECTORY 3 .Os .Sh NAME @@ -46,7 +46,6 @@ .Sh LIBRARY .Lb libc .Sh SYNOPSIS -.In sys/types.h .In dirent.h .Ft DIR * .Fn opendir "const char *filename" -- cgit v1.2.3 From 3e1cfddd479dab514620ed30a69ca536716c01ae Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Fri, 19 Feb 2016 08:35:29 +0000 Subject: Rename pmap.h to pmap-v4.h and remove pmap-v6.h include from it. Create new pmap.h which includes specific header according to __ARM_ARCH. Note that is included from so one common must exist. --- sys/arm/include/pmap-v4.h | 541 ++++++++++++++++++++++++++++++++++++++++++++++ sys/arm/include/pmap-v6.h | 6 +- sys/arm/include/pmap.h | 522 +------------------------------------------- 3 files changed, 552 insertions(+), 517 deletions(-) create mode 100644 sys/arm/include/pmap-v4.h diff --git a/sys/arm/include/pmap-v4.h b/sys/arm/include/pmap-v4.h new file mode 100644 index 000000000000..5b7f3a1031c6 --- /dev/null +++ b/sys/arm/include/pmap-v4.h @@ -0,0 +1,541 @@ +/*- + * Copyright (c) 1991 Regents of the University of California. + * All rights reserved. + * + * This code is derived from software contributed to Berkeley by + * the Systems Programming Group of the University of Utah Computer + * Science Department and William Jolitz of UUNET Technologies Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the University of + * California, Berkeley and its contributors. + * 4. Neither the name of the University nor the names of its contributors + * may be used to endorse or promote products derived from this software + * without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * Derived from hp300 version by Mike Hibler, this version by William + * Jolitz uses a recursive map [a pde points to the page directory] to + * map the page tables using the pagetables themselves. This is done to + * reduce the impact on kernel virtual memory for lots of sparse address + * space, and to reduce the cost of memory to each process. + * + * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 + * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 + * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_PMAP_V4_H_ +#define _MACHINE_PMAP_V4_H_ + +#include +#include +/* + * Pte related macros + */ +#define PTE_NOCACHE 1 +#define PTE_CACHE 2 +#define PTE_DEVICE PTE_NOCACHE +#define PTE_PAGETABLE 3 + +enum mem_type { + STRONG_ORD = 0, + DEVICE_NOSHARE, + DEVICE_SHARE, + NRML_NOCACHE, + NRML_IWT_OWT, + NRML_IWB_OWB, + NRML_IWBA_OWBA +}; + +#ifndef LOCORE + +#include +#include +#include +#include + +#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ +#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ + +#ifdef _KERNEL + +#define vtophys(va) pmap_kextract((vm_offset_t)(va)) + +#endif + +#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) +#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) +#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) +void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); + +/* + * Pmap stuff + */ + +/* + * This structure is used to hold a virtual<->physical address + * association and is used mostly by bootstrap code + */ +struct pv_addr { + SLIST_ENTRY(pv_addr) pv_list; + vm_offset_t pv_va; + vm_paddr_t pv_pa; +}; + +struct pv_entry; +struct pv_chunk; + +struct md_page { + int pvh_attrs; + vm_memattr_t pv_memattr; + vm_offset_t pv_kva; /* first kernel VA mapping */ + TAILQ_HEAD(,pv_entry) pv_list; +}; + +struct l1_ttable; +struct l2_dtable; + + +/* + * The number of L2 descriptor tables which can be tracked by an l2_dtable. + * A bucket size of 16 provides for 16MB of contiguous virtual address + * space per l2_dtable. Most processes will, therefore, require only two or + * three of these to map their whole working set. + */ +#define L2_BUCKET_LOG2 4 +#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) +/* + * Given the above "L2-descriptors-per-l2_dtable" constant, the number + * of l2_dtable structures required to track all possible page descriptors + * mappable by an L1 translation table is given by the following constants: + */ +#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) +#define L2_SIZE (1 << L2_LOG2) + +struct pmap { + struct mtx pm_mtx; + u_int8_t pm_domain; + struct l1_ttable *pm_l1; + struct l2_dtable *pm_l2[L2_SIZE]; + cpuset_t pm_active; /* active on cpus */ + struct pmap_statistics pm_stats; /* pmap statictics */ + TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ +}; + +typedef struct pmap *pmap_t; + +#ifdef _KERNEL +extern struct pmap kernel_pmap_store; +#define kernel_pmap (&kernel_pmap_store) + +#define PMAP_ASSERT_LOCKED(pmap) \ + mtx_assert(&(pmap)->pm_mtx, MA_OWNED) +#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) +#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) +#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ + NULL, MTX_DEF | MTX_DUPOK) +#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) +#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) +#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) +#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) +#endif + + +/* + * For each vm_page_t, there is a list of all currently valid virtual + * mappings of that page. An entry is a pv_entry_t, the list is pv_list. + */ +typedef struct pv_entry { + vm_offset_t pv_va; /* virtual address for mapping */ + TAILQ_ENTRY(pv_entry) pv_list; + int pv_flags; /* flags (wired, etc...) */ + pmap_t pv_pmap; /* pmap where mapping lies */ + TAILQ_ENTRY(pv_entry) pv_plist; +} *pv_entry_t; + +/* + * pv_entries are allocated in chunks per-process. This avoids the + * need to track per-pmap assignments. + */ +#define _NPCM 8 +#define _NPCPV 252 + +struct pv_chunk { + pmap_t pc_pmap; + TAILQ_ENTRY(pv_chunk) pc_list; + uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ + uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ + TAILQ_ENTRY(pv_chunk) pc_lru; + struct pv_entry pc_pventry[_NPCPV]; +}; + +#ifdef _KERNEL + +boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); + +/* + * virtual address to page table entry and + * to physical address. Likewise for alternate address space. + * Note: these work recursively, thus vtopte of a pte will give + * the corresponding pde that in turn maps it. + */ + +/* + * The current top of kernel VM. + */ +extern vm_offset_t pmap_curmaxkvaddr; + +struct pcb; + +void pmap_set_pcb_pagedir(pmap_t, struct pcb *); +/* Virtual address to page table entry */ +static __inline pt_entry_t * +vtopte(vm_offset_t va) +{ + pd_entry_t *pdep; + pt_entry_t *ptep; + + if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) + return (NULL); + return (ptep); +} + +extern vm_paddr_t phys_avail[]; +extern vm_offset_t virtual_avail; +extern vm_offset_t virtual_end; + +void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); +int pmap_change_attr(vm_offset_t, vm_size_t, int); +void pmap_kenter(vm_offset_t va, vm_paddr_t pa); +void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); +void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); +void pmap_kremove_device(vm_offset_t, vm_size_t); +void *pmap_kenter_temporary(vm_paddr_t pa, int i); +void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); +vm_paddr_t pmap_kextract(vm_offset_t va); +vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); +void pmap_kremove(vm_offset_t); +void *pmap_mapdev(vm_offset_t, vm_size_t); +void pmap_unmapdev(vm_offset_t, vm_size_t); +vm_page_t pmap_use_pt(pmap_t, vm_offset_t); +void pmap_debug(int); +void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); +void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); +vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); +void +pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, + int cache); +int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); + +/* + * Definitions for MMU domains + */ +#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ +#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ + +/* + * The new pmap ensures that page-tables are always mapping Write-Thru. + * Thus, on some platforms we can run fast and loose and avoid syncing PTEs + * on every change. + * + * Unfortunately, not all CPUs have a write-through cache mode. So we + * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, + * and if there is the chance for PTE syncs to be needed, we define + * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) + * the code. + */ +extern int pmap_needs_pte_sync; + +/* + * These macros define the various bit masks in the PTE. + * + * We use these macros since we use different bits on different processor + * models. + */ + +#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) +#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ + L1_S_XSCALE_TEX(TEX_XSCALE_T)) + +#define L2_L_CACHE_MASK_generic (L2_B|L2_C) +#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ + L2_XSCALE_L_TEX(TEX_XSCALE_T)) + +#define L2_S_PROT_U_generic (L2_AP(AP_U)) +#define L2_S_PROT_W_generic (L2_AP(AP_W)) +#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) + +#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) +#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) +#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) + +#define L2_S_CACHE_MASK_generic (L2_B|L2_C) +#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ + L2_XSCALE_T_TEX(TEX_XSCALE_X)) + +#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) +#define L1_S_PROTO_xscale (L1_TYPE_S) + +#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) +#define L1_C_PROTO_xscale (L1_TYPE_C) + +#define L2_L_PROTO (L2_TYPE_L) + +#define L2_S_PROTO_generic (L2_TYPE_S) +#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) + +/* + * User-visible names for the ones that vary with MMU class. + */ +#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) + +#if ARM_NMMUS > 1 +/* More than one MMU class configured; use variables. */ +#define L2_S_PROT_U pte_l2_s_prot_u +#define L2_S_PROT_W pte_l2_s_prot_w +#define L2_S_PROT_MASK pte_l2_s_prot_mask + +#define L1_S_CACHE_MASK pte_l1_s_cache_mask +#define L2_L_CACHE_MASK pte_l2_l_cache_mask +#define L2_S_CACHE_MASK pte_l2_s_cache_mask + +#define L1_S_PROTO pte_l1_s_proto +#define L1_C_PROTO pte_l1_c_proto +#define L2_S_PROTO pte_l2_s_proto + +#elif ARM_MMU_GENERIC != 0 +#define L2_S_PROT_U L2_S_PROT_U_generic +#define L2_S_PROT_W L2_S_PROT_W_generic +#define L2_S_PROT_MASK L2_S_PROT_MASK_generic + +#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic +#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic +#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic + +#define L1_S_PROTO L1_S_PROTO_generic +#define L1_C_PROTO L1_C_PROTO_generic +#define L2_S_PROTO L2_S_PROTO_generic + +#elif ARM_MMU_XSCALE == 1 +#define L2_S_PROT_U L2_S_PROT_U_xscale +#define L2_S_PROT_W L2_S_PROT_W_xscale +#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale + +#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale +#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale +#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale + +#define L1_S_PROTO L1_S_PROTO_xscale +#define L1_C_PROTO L1_C_PROTO_xscale +#define L2_S_PROTO L2_S_PROTO_xscale + +#endif /* ARM_NMMUS > 1 */ + +#if defined(CPU_XSCALE_81342) +#define PMAP_NEEDS_PTE_SYNC 1 +#define PMAP_INCLUDE_PTE_SYNC +#else +#define PMAP_NEEDS_PTE_SYNC 0 +#endif + +/* + * These macros return various bits based on kernel/user and protection. + * Note that the compiler will usually fold these at compile time. + */ +#define L1_S_PROT_U (L1_S_AP(AP_U)) +#define L1_S_PROT_W (L1_S_AP(AP_W)) +#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) +#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) + +#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ + (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) + +#define L2_L_PROT_U (L2_AP(AP_U)) +#define L2_L_PROT_W (L2_AP(AP_W)) +#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) + +#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ + (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) + +#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ + (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) + +/* + * Macros to test if a mapping is mappable with an L1 Section mapping + * or an L2 Large Page mapping. + */ +#define L1_S_MAPPABLE_P(va, pa, size) \ + ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) + +#define L2_L_MAPPABLE_P(va, pa, size) \ + ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) + +/* + * Provide a fallback in case we were not able to determine it at + * compile-time. + */ +#ifndef PMAP_NEEDS_PTE_SYNC +#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync +#define PMAP_INCLUDE_PTE_SYNC +#endif + +#ifdef ARM_L2_PIPT +#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) +#else +#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) +#endif + +#define PTE_SYNC(pte) \ +do { \ + if (PMAP_NEEDS_PTE_SYNC) { \ + cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ + cpu_drain_writebuf(); \ + _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ + } else \ + cpu_drain_writebuf(); \ +} while (/*CONSTCOND*/0) + +#define PTE_SYNC_RANGE(pte, cnt) \ +do { \ + if (PMAP_NEEDS_PTE_SYNC) { \ + cpu_dcache_wb_range((vm_offset_t)(pte), \ + (cnt) << 2); /* * sizeof(pt_entry_t) */ \ + cpu_drain_writebuf(); \ + _sync_l2((vm_offset_t)(pte), \ + (cnt) << 2); /* * sizeof(pt_entry_t) */ \ + } else \ + cpu_drain_writebuf(); \ +} while (/*CONSTCOND*/0) + +extern pt_entry_t pte_l1_s_cache_mode; +extern pt_entry_t pte_l1_s_cache_mask; + +extern pt_entry_t pte_l2_l_cache_mode; +extern pt_entry_t pte_l2_l_cache_mask; + +extern pt_entry_t pte_l2_s_cache_mode; +extern pt_entry_t pte_l2_s_cache_mask; + +extern pt_entry_t pte_l1_s_cache_mode_pt; +extern pt_entry_t pte_l2_l_cache_mode_pt; +extern pt_entry_t pte_l2_s_cache_mode_pt; + +extern pt_entry_t pte_l2_s_prot_u; +extern pt_entry_t pte_l2_s_prot_w; +extern pt_entry_t pte_l2_s_prot_mask; + +extern pt_entry_t pte_l1_s_proto; +extern pt_entry_t pte_l1_c_proto; +extern pt_entry_t pte_l2_s_proto; + +extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); +extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, + vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); +extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); + +#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) +void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); +void pmap_zero_page_generic(vm_paddr_t, int, int); + +void pmap_pte_init_generic(void); +#endif /* ARM_MMU_GENERIC != 0 */ + +#if ARM_MMU_XSCALE == 1 +void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); +void pmap_zero_page_xscale(vm_paddr_t, int, int); + +void pmap_pte_init_xscale(void); + +void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); + +void pmap_use_minicache(vm_offset_t, vm_size_t); +#endif /* ARM_MMU_XSCALE == 1 */ +#if defined(CPU_XSCALE_81342) +#define ARM_HAVE_SUPERSECTIONS +#endif + +#define PTE_KERNEL 0 +#define PTE_USER 1 +#define l1pte_valid(pde) ((pde) != 0) +#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) +#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) +#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) + +#define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) +#define l2pte_valid(pte) ((pte) != 0) +#define l2pte_pa(pte) ((pte) & L2_S_FRAME) +#define l2pte_minidata(pte) (((pte) & \ + (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ + == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) + +/* L1 and L2 page table macros */ +#define pmap_pde_v(pde) l1pte_valid(*(pde)) +#define pmap_pde_section(pde) l1pte_section_p(*(pde)) +#define pmap_pde_page(pde) l1pte_page_p(*(pde)) +#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) + +#define pmap_pte_v(pte) l2pte_valid(*(pte)) +#define pmap_pte_pa(pte) l2pte_pa(*(pte)) + +/* + * Flags that indicate attributes of pages or mappings of pages. + * + * The PVF_MOD and PVF_REF flags are stored in the mdpage for each + * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual + * pv_entry's for each page. They live in the same "namespace" so + * that we can clear multiple attributes at a time. + * + * Note the "non-cacheable" flag generally means the page has + * multiple mappings in a given address space. + */ +#define PVF_MOD 0x01 /* page is modified */ +#define PVF_REF 0x02 /* page is referenced */ +#define PVF_WIRED 0x04 /* mapping is wired */ +#define PVF_WRITE 0x08 /* mapping is writable */ +#define PVF_EXEC 0x10 /* mapping is executable */ +#define PVF_NC 0x20 /* mapping is non-cacheable */ +#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ +#define PVF_UNMAN 0x80 /* mapping is unmanaged */ + +void vector_page_setprot(int); + +#define SECTION_CACHE 0x1 +#define SECTION_PT 0x2 +void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); +#ifdef ARM_HAVE_SUPERSECTIONS +void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); +#endif + +extern char *_tmppt; + +void pmap_postinit(void); + +extern vm_paddr_t dump_avail[]; +#endif /* _KERNEL */ + +#endif /* !LOCORE */ + +#endif /* !_MACHINE_PMAP_V4_H_ */ diff --git a/sys/arm/include/pmap-v6.h b/sys/arm/include/pmap-v6.h index 87da59adde9e..bfc6860db0b1 100644 --- a/sys/arm/include/pmap-v6.h +++ b/sys/arm/include/pmap-v6.h @@ -45,8 +45,8 @@ * $FreeBSD$ */ -#ifndef _MACHINE_PMAP_H_ -#define _MACHINE_PMAP_H_ +#ifndef _MACHINE_PMAP_V6_H_ +#define _MACHINE_PMAP_V6_H_ #include #include @@ -220,4 +220,4 @@ void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t, vm_memattr_t); #endif /* _KERNEL */ -#endif /* !_MACHINE_PMAP_H_ */ +#endif /* !_MACHINE_PMAP_V6_H_ */ diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h index 8372929cb010..ec69077b15ba 100644 --- a/sys/arm/include/pmap.h +++ b/sys/arm/include/pmap.h @@ -1,11 +1,8 @@ /*- - * Copyright (c) 1991 Regents of the University of California. + * Copyright (c) 2016 Svatopluk Kraus + * Copyright (c) 2016 Michal Meloun * All rights reserved. * - * This code is derived from software contributed to Berkeley by - * the Systems Programming Group of the University of Utah Computer - * Science Department and William Jolitz of UUNET Technologies Inc. - * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: @@ -14,18 +11,11 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the University of - * California, Berkeley and its contributors. - * 4. Neither the name of the University nor the names of its contributors - * may be used to endorse or promote products derived from this software - * without specific prior written permission. * - * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -34,514 +24,18 @@ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * - * Derived from hp300 version by Mike Hibler, this version by William - * Jolitz uses a recursive map [a pde points to the page directory] to - * map the page tables using the pagetables themselves. This is done to - * reduce the impact on kernel virtual memory for lots of sparse address - * space, and to reduce the cost of memory to each process. - * - * from: hp300: @(#)pmap.h 7.2 (Berkeley) 12/16/90 - * from: @(#)pmap.h 7.4 (Berkeley) 5/12/91 - * from: FreeBSD: src/sys/i386/include/pmap.h,v 1.70 2000/11/30 - * * $FreeBSD$ */ - #include - -#if __ARM_ARCH >= 6 -#include -#else /* __ARM_ARCH >= 6 */ #ifndef _MACHINE_PMAP_H_ #define _MACHINE_PMAP_H_ -#include -#include -/* - * Pte related macros - */ -#define PTE_NOCACHE 1 -#define PTE_CACHE 2 -#define PTE_DEVICE PTE_NOCACHE -#define PTE_PAGETABLE 3 - -enum mem_type { - STRONG_ORD = 0, - DEVICE_NOSHARE, - DEVICE_SHARE, - NRML_NOCACHE, - NRML_IWT_OWT, - NRML_IWB_OWB, - NRML_IWBA_OWBA -}; - -#ifndef LOCORE - -#include -#include -#include -#include - -#define PDESIZE sizeof(pd_entry_t) /* for assembly files */ -#define PTESIZE sizeof(pt_entry_t) /* for assembly files */ - -#ifdef _KERNEL - -#define vtophys(va) pmap_kextract((vm_offset_t)(va)) - -#endif - -#define pmap_page_get_memattr(m) ((m)->md.pv_memattr) -#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) -#define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) -void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); - -/* - * Pmap stuff - */ - -/* - * This structure is used to hold a virtual<->physical address - * association and is used mostly by bootstrap code - */ -struct pv_addr { - SLIST_ENTRY(pv_addr) pv_list; - vm_offset_t pv_va; - vm_paddr_t pv_pa; -}; - -struct pv_entry; -struct pv_chunk; - -struct md_page { - int pvh_attrs; - vm_memattr_t pv_memattr; - vm_offset_t pv_kva; /* first kernel VA mapping */ - TAILQ_HEAD(,pv_entry) pv_list; -}; - -struct l1_ttable; -struct l2_dtable; - - -/* - * The number of L2 descriptor tables which can be tracked by an l2_dtable. - * A bucket size of 16 provides for 16MB of contiguous virtual address - * space per l2_dtable. Most processes will, therefore, require only two or - * three of these to map their whole working set. - */ -#define L2_BUCKET_LOG2 4 -#define L2_BUCKET_SIZE (1 << L2_BUCKET_LOG2) -/* - * Given the above "L2-descriptors-per-l2_dtable" constant, the number - * of l2_dtable structures required to track all possible page descriptors - * mappable by an L1 translation table is given by the following constants: - */ -#define L2_LOG2 ((32 - L1_S_SHIFT) - L2_BUCKET_LOG2) -#define L2_SIZE (1 << L2_LOG2) - -struct pmap { - struct mtx pm_mtx; - u_int8_t pm_domain; - struct l1_ttable *pm_l1; - struct l2_dtable *pm_l2[L2_SIZE]; - cpuset_t pm_active; /* active on cpus */ - struct pmap_statistics pm_stats; /* pmap statictics */ - TAILQ_HEAD(,pv_entry) pm_pvlist; /* list of mappings in pmap */ -}; - -typedef struct pmap *pmap_t; - -#ifdef _KERNEL -extern struct pmap kernel_pmap_store; -#define kernel_pmap (&kernel_pmap_store) - -#define PMAP_ASSERT_LOCKED(pmap) \ - mtx_assert(&(pmap)->pm_mtx, MA_OWNED) -#define PMAP_LOCK(pmap) mtx_lock(&(pmap)->pm_mtx) -#define PMAP_LOCK_DESTROY(pmap) mtx_destroy(&(pmap)->pm_mtx) -#define PMAP_LOCK_INIT(pmap) mtx_init(&(pmap)->pm_mtx, "pmap", \ - NULL, MTX_DEF | MTX_DUPOK) -#define PMAP_OWNED(pmap) mtx_owned(&(pmap)->pm_mtx) -#define PMAP_MTX(pmap) (&(pmap)->pm_mtx) -#define PMAP_TRYLOCK(pmap) mtx_trylock(&(pmap)->pm_mtx) -#define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) -#endif - - -/* - * For each vm_page_t, there is a list of all currently valid virtual - * mappings of that page. An entry is a pv_entry_t, the list is pv_list. - */ -typedef struct pv_entry { - vm_offset_t pv_va; /* virtual address for mapping */ - TAILQ_ENTRY(pv_entry) pv_list; - int pv_flags; /* flags (wired, etc...) */ - pmap_t pv_pmap; /* pmap where mapping lies */ - TAILQ_ENTRY(pv_entry) pv_plist; -} *pv_entry_t; - -/* - * pv_entries are allocated in chunks per-process. This avoids the - * need to track per-pmap assignments. - */ -#define _NPCM 8 -#define _NPCPV 252 - -struct pv_chunk { - pmap_t pc_pmap; - TAILQ_ENTRY(pv_chunk) pc_list; - uint32_t pc_map[_NPCM]; /* bitmap; 1 = free */ - uint32_t pc_dummy[3]; /* aligns pv_chunk to 4KB */ - TAILQ_ENTRY(pv_chunk) pc_lru; - struct pv_entry pc_pventry[_NPCPV]; -}; - -#ifdef _KERNEL - -boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); - -/* - * virtual address to page table entry and - * to physical address. Likewise for alternate address space. - * Note: these work recursively, thus vtopte of a pte will give - * the corresponding pde that in turn maps it. - */ - -/* - * The current top of kernel VM. - */ -extern vm_offset_t pmap_curmaxkvaddr; - -struct pcb; - -void pmap_set_pcb_pagedir(pmap_t, struct pcb *); -/* Virtual address to page table entry */ -static __inline pt_entry_t * -vtopte(vm_offset_t va) -{ - pd_entry_t *pdep; - pt_entry_t *ptep; - - if (pmap_get_pde_pte(kernel_pmap, va, &pdep, &ptep) == FALSE) - return (NULL); - return (ptep); -} - -extern vm_paddr_t phys_avail[]; -extern vm_offset_t virtual_avail; -extern vm_offset_t virtual_end; - -void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); -int pmap_change_attr(vm_offset_t, vm_size_t, int); -void pmap_kenter(vm_offset_t va, vm_paddr_t pa); -void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); -void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); -void pmap_kremove_device(vm_offset_t, vm_size_t); -void *pmap_kenter_temporary(vm_paddr_t pa, int i); -void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); -vm_paddr_t pmap_kextract(vm_offset_t va); -vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); -void pmap_kremove(vm_offset_t); -void *pmap_mapdev(vm_offset_t, vm_size_t); -void pmap_unmapdev(vm_offset_t, vm_size_t); -vm_page_t pmap_use_pt(pmap_t, vm_offset_t); -void pmap_debug(int); -void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); -void pmap_link_l2pt(vm_offset_t, vm_offset_t, struct pv_addr *); -vm_size_t pmap_map_chunk(vm_offset_t, vm_offset_t, vm_offset_t, vm_size_t, int, int); -void -pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, - int cache); -int pmap_fault_fixup(pmap_t, vm_offset_t, vm_prot_t, int); - -/* - * Definitions for MMU domains - */ -#define PMAP_DOMAINS 15 /* 15 'user' domains (1-15) */ -#define PMAP_DOMAIN_KERNEL 0 /* The kernel uses domain #0 */ - -/* - * The new pmap ensures that page-tables are always mapping Write-Thru. - * Thus, on some platforms we can run fast and loose and avoid syncing PTEs - * on every change. - * - * Unfortunately, not all CPUs have a write-through cache mode. So we - * define PMAP_NEEDS_PTE_SYNC for C code to conditionally do PTE syncs, - * and if there is the chance for PTE syncs to be needed, we define - * PMAP_INCLUDE_PTE_SYNC so e.g. assembly code can include (and run) - * the code. - */ -extern int pmap_needs_pte_sync; - -/* - * These macros define the various bit masks in the PTE. - * - * We use these macros since we use different bits on different processor - * models. - */ - -#define L1_S_CACHE_MASK_generic (L1_S_B|L1_S_C) -#define L1_S_CACHE_MASK_xscale (L1_S_B|L1_S_C|L1_S_XSCALE_TEX(TEX_XSCALE_X)|\ - L1_S_XSCALE_TEX(TEX_XSCALE_T)) - -#define L2_L_CACHE_MASK_generic (L2_B|L2_C) -#define L2_L_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_L_TEX(TEX_XSCALE_X) | \ - L2_XSCALE_L_TEX(TEX_XSCALE_T)) - -#define L2_S_PROT_U_generic (L2_AP(AP_U)) -#define L2_S_PROT_W_generic (L2_AP(AP_W)) -#define L2_S_PROT_MASK_generic (L2_S_PROT_U|L2_S_PROT_W) - -#define L2_S_PROT_U_xscale (L2_AP0(AP_U)) -#define L2_S_PROT_W_xscale (L2_AP0(AP_W)) -#define L2_S_PROT_MASK_xscale (L2_S_PROT_U|L2_S_PROT_W) - -#define L2_S_CACHE_MASK_generic (L2_B|L2_C) -#define L2_S_CACHE_MASK_xscale (L2_B|L2_C|L2_XSCALE_T_TEX(TEX_XSCALE_X)| \ - L2_XSCALE_T_TEX(TEX_XSCALE_X)) - -#define L1_S_PROTO_generic (L1_TYPE_S | L1_S_IMP) -#define L1_S_PROTO_xscale (L1_TYPE_S) +#include -#define L1_C_PROTO_generic (L1_TYPE_C | L1_C_IMP2) -#define L1_C_PROTO_xscale (L1_TYPE_C) - -#define L2_L_PROTO (L2_TYPE_L) - -#define L2_S_PROTO_generic (L2_TYPE_S) -#define L2_S_PROTO_xscale (L2_TYPE_XSCALE_XS) - -/* - * User-visible names for the ones that vary with MMU class. - */ -#define L2_AP(x) (L2_AP0(x) | L2_AP1(x) | L2_AP2(x) | L2_AP3(x)) - -#if ARM_NMMUS > 1 -/* More than one MMU class configured; use variables. */ -#define L2_S_PROT_U pte_l2_s_prot_u -#define L2_S_PROT_W pte_l2_s_prot_w -#define L2_S_PROT_MASK pte_l2_s_prot_mask - -#define L1_S_CACHE_MASK pte_l1_s_cache_mask -#define L2_L_CACHE_MASK pte_l2_l_cache_mask -#define L2_S_CACHE_MASK pte_l2_s_cache_mask - -#define L1_S_PROTO pte_l1_s_proto -#define L1_C_PROTO pte_l1_c_proto -#define L2_S_PROTO pte_l2_s_proto - -#elif ARM_MMU_GENERIC != 0 -#define L2_S_PROT_U L2_S_PROT_U_generic -#define L2_S_PROT_W L2_S_PROT_W_generic -#define L2_S_PROT_MASK L2_S_PROT_MASK_generic - -#define L1_S_CACHE_MASK L1_S_CACHE_MASK_generic -#define L2_L_CACHE_MASK L2_L_CACHE_MASK_generic -#define L2_S_CACHE_MASK L2_S_CACHE_MASK_generic - -#define L1_S_PROTO L1_S_PROTO_generic -#define L1_C_PROTO L1_C_PROTO_generic -#define L2_S_PROTO L2_S_PROTO_generic - -#elif ARM_MMU_XSCALE == 1 -#define L2_S_PROT_U L2_S_PROT_U_xscale -#define L2_S_PROT_W L2_S_PROT_W_xscale -#define L2_S_PROT_MASK L2_S_PROT_MASK_xscale - -#define L1_S_CACHE_MASK L1_S_CACHE_MASK_xscale -#define L2_L_CACHE_MASK L2_L_CACHE_MASK_xscale -#define L2_S_CACHE_MASK L2_S_CACHE_MASK_xscale - -#define L1_S_PROTO L1_S_PROTO_xscale -#define L1_C_PROTO L1_C_PROTO_xscale -#define L2_S_PROTO L2_S_PROTO_xscale - -#endif /* ARM_NMMUS > 1 */ - -#if defined(CPU_XSCALE_81342) -#define PMAP_NEEDS_PTE_SYNC 1 -#define PMAP_INCLUDE_PTE_SYNC -#else -#define PMAP_NEEDS_PTE_SYNC 0 -#endif - -/* - * These macros return various bits based on kernel/user and protection. - * Note that the compiler will usually fold these at compile time. - */ -#define L1_S_PROT_U (L1_S_AP(AP_U)) -#define L1_S_PROT_W (L1_S_AP(AP_W)) -#define L1_S_PROT_MASK (L1_S_PROT_U|L1_S_PROT_W) -#define L1_S_WRITABLE(pd) ((pd) & L1_S_PROT_W) - -#define L1_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L1_S_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L1_S_PROT_W : 0)) - -#define L2_L_PROT_U (L2_AP(AP_U)) -#define L2_L_PROT_W (L2_AP(AP_W)) -#define L2_L_PROT_MASK (L2_L_PROT_U|L2_L_PROT_W) - -#define L2_L_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_L_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L2_L_PROT_W : 0)) - -#define L2_S_PROT(ku, pr) ((((ku) == PTE_USER) ? L2_S_PROT_U : 0) | \ - (((pr) & VM_PROT_WRITE) ? L2_S_PROT_W : 0)) - -/* - * Macros to test if a mapping is mappable with an L1 Section mapping - * or an L2 Large Page mapping. - */ -#define L1_S_MAPPABLE_P(va, pa, size) \ - ((((va) | (pa)) & L1_S_OFFSET) == 0 && (size) >= L1_S_SIZE) - -#define L2_L_MAPPABLE_P(va, pa, size) \ - ((((va) | (pa)) & L2_L_OFFSET) == 0 && (size) >= L2_L_SIZE) - -/* - * Provide a fallback in case we were not able to determine it at - * compile-time. - */ -#ifndef PMAP_NEEDS_PTE_SYNC -#define PMAP_NEEDS_PTE_SYNC pmap_needs_pte_sync -#define PMAP_INCLUDE_PTE_SYNC -#endif - -#ifdef ARM_L2_PIPT -#define _sync_l2(pte, size) cpu_l2cache_wb_range(vtophys(pte), size) +#if __ARM_ARCH >= 6 +#include #else -#define _sync_l2(pte, size) cpu_l2cache_wb_range(pte, size) +#include #endif -#define PTE_SYNC(pte) \ -do { \ - if (PMAP_NEEDS_PTE_SYNC) { \ - cpu_dcache_wb_range((vm_offset_t)(pte), sizeof(pt_entry_t));\ - cpu_drain_writebuf(); \ - _sync_l2((vm_offset_t)(pte), sizeof(pt_entry_t));\ - } else \ - cpu_drain_writebuf(); \ -} while (/*CONSTCOND*/0) - -#define PTE_SYNC_RANGE(pte, cnt) \ -do { \ - if (PMAP_NEEDS_PTE_SYNC) { \ - cpu_dcache_wb_range((vm_offset_t)(pte), \ - (cnt) << 2); /* * sizeof(pt_entry_t) */ \ - cpu_drain_writebuf(); \ - _sync_l2((vm_offset_t)(pte), \ - (cnt) << 2); /* * sizeof(pt_entry_t) */ \ - } else \ - cpu_drain_writebuf(); \ -} while (/*CONSTCOND*/0) - -extern pt_entry_t pte_l1_s_cache_mode; -extern pt_entry_t pte_l1_s_cache_mask; - -extern pt_entry_t pte_l2_l_cache_mode; -extern pt_entry_t pte_l2_l_cache_mask; - -extern pt_entry_t pte_l2_s_cache_mode; -extern pt_entry_t pte_l2_s_cache_mask; - -extern pt_entry_t pte_l1_s_cache_mode_pt; -extern pt_entry_t pte_l2_l_cache_mode_pt; -extern pt_entry_t pte_l2_s_cache_mode_pt; - -extern pt_entry_t pte_l2_s_prot_u; -extern pt_entry_t pte_l2_s_prot_w; -extern pt_entry_t pte_l2_s_prot_mask; - -extern pt_entry_t pte_l1_s_proto; -extern pt_entry_t pte_l1_c_proto; -extern pt_entry_t pte_l2_s_proto; - -extern void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); -extern void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, - vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); -extern void (*pmap_zero_page_func)(vm_paddr_t, int, int); - -#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_81342) -void pmap_copy_page_generic(vm_paddr_t, vm_paddr_t); -void pmap_zero_page_generic(vm_paddr_t, int, int); - -void pmap_pte_init_generic(void); -#endif /* ARM_MMU_GENERIC != 0 */ - -#if ARM_MMU_XSCALE == 1 -void pmap_copy_page_xscale(vm_paddr_t, vm_paddr_t); -void pmap_zero_page_xscale(vm_paddr_t, int, int); - -void pmap_pte_init_xscale(void); - -void xscale_setup_minidata(vm_offset_t, vm_offset_t, vm_offset_t); - -void pmap_use_minicache(vm_offset_t, vm_size_t); -#endif /* ARM_MMU_XSCALE == 1 */ -#if defined(CPU_XSCALE_81342) -#define ARM_HAVE_SUPERSECTIONS -#endif - -#define PTE_KERNEL 0 -#define PTE_USER 1 -#define l1pte_valid(pde) ((pde) != 0) -#define l1pte_section_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_S) -#define l1pte_page_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_C) -#define l1pte_fpage_p(pde) (((pde) & L1_TYPE_MASK) == L1_TYPE_F) - -#define l2pte_index(v) (((v) & L1_S_OFFSET) >> L2_S_SHIFT) -#define l2pte_valid(pte) ((pte) != 0) -#define l2pte_pa(pte) ((pte) & L2_S_FRAME) -#define l2pte_minidata(pte) (((pte) & \ - (L2_B | L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X)))\ - == (L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X))) - -/* L1 and L2 page table macros */ -#define pmap_pde_v(pde) l1pte_valid(*(pde)) -#define pmap_pde_section(pde) l1pte_section_p(*(pde)) -#define pmap_pde_page(pde) l1pte_page_p(*(pde)) -#define pmap_pde_fpage(pde) l1pte_fpage_p(*(pde)) - -#define pmap_pte_v(pte) l2pte_valid(*(pte)) -#define pmap_pte_pa(pte) l2pte_pa(*(pte)) - -/* - * Flags that indicate attributes of pages or mappings of pages. - * - * The PVF_MOD and PVF_REF flags are stored in the mdpage for each - * page. PVF_WIRED, PVF_WRITE, and PVF_NC are kept in individual - * pv_entry's for each page. They live in the same "namespace" so - * that we can clear multiple attributes at a time. - * - * Note the "non-cacheable" flag generally means the page has - * multiple mappings in a given address space. - */ -#define PVF_MOD 0x01 /* page is modified */ -#define PVF_REF 0x02 /* page is referenced */ -#define PVF_WIRED 0x04 /* mapping is wired */ -#define PVF_WRITE 0x08 /* mapping is writable */ -#define PVF_EXEC 0x10 /* mapping is executable */ -#define PVF_NC 0x20 /* mapping is non-cacheable */ -#define PVF_MWC 0x40 /* mapping is used multiple times in userland */ -#define PVF_UNMAN 0x80 /* mapping is unmanaged */ - -void vector_page_setprot(int); - -#define SECTION_CACHE 0x1 -#define SECTION_PT 0x2 -void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); -#ifdef ARM_HAVE_SUPERSECTIONS -void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); -#endif - -extern char *_tmppt; - -void pmap_postinit(void); - -extern vm_paddr_t dump_avail[]; -#endif /* _KERNEL */ - -#endif /* !LOCORE */ - #endif /* !_MACHINE_PMAP_H_ */ -#endif /* __ARM_ARCH >= 6 */ -- cgit v1.2.3 From 12fdcefc8db09e839c03e4654841c96be9c3877f Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Fri, 19 Feb 2016 08:41:47 +0000 Subject: Move common definitions from both pmap-v4.h and pmap-v6.h into pmap.h. (1) MI interface needed for vm subsystem. (2) MD interface created for ARM architecture to be used in files shared by armv4 and armv6 platforms. --- sys/arm/include/pmap-v4.h | 25 ------------------------- sys/arm/include/pmap-v6.h | 17 ----------------- sys/arm/include/pmap.h | 27 +++++++++++++++++++++++++++ 3 files changed, 27 insertions(+), 42 deletions(-) diff --git a/sys/arm/include/pmap-v4.h b/sys/arm/include/pmap-v4.h index 5b7f3a1031c6..bfa6be49afe1 100644 --- a/sys/arm/include/pmap-v4.h +++ b/sys/arm/include/pmap-v4.h @@ -80,16 +80,8 @@ enum mem_type { #define PDESIZE sizeof(pd_entry_t) /* for assembly files */ #define PTESIZE sizeof(pt_entry_t) /* for assembly files */ -#ifdef _KERNEL - -#define vtophys(va) pmap_kextract((vm_offset_t)(va)) - -#endif - #define pmap_page_get_memattr(m) ((m)->md.pv_memattr) -#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) #define pmap_page_is_mapped(m) (!TAILQ_EMPTY(&(m)->md.pv_list)) -void pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma); /* * Pmap stuff @@ -163,7 +155,6 @@ extern struct pmap kernel_pmap_store; #define PMAP_UNLOCK(pmap) mtx_unlock(&(pmap)->pm_mtx) #endif - /* * For each vm_page_t, there is a list of all currently valid virtual * mappings of that page. An entry is a pv_entry_t, the list is pv_list. @@ -208,9 +199,6 @@ boolean_t pmap_get_pde_pte(pmap_t, vm_offset_t, pd_entry_t **, pt_entry_t **); */ extern vm_offset_t pmap_curmaxkvaddr; -struct pcb; - -void pmap_set_pcb_pagedir(pmap_t, struct pcb *); /* Virtual address to page table entry */ static __inline pt_entry_t * vtopte(vm_offset_t va) @@ -223,23 +211,13 @@ vtopte(vm_offset_t va) return (ptep); } -extern vm_paddr_t phys_avail[]; -extern vm_offset_t virtual_avail; -extern vm_offset_t virtual_end; - void pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt); int pmap_change_attr(vm_offset_t, vm_size_t, int); void pmap_kenter(vm_offset_t va, vm_paddr_t pa); void pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa); -void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); -void pmap_kremove_device(vm_offset_t, vm_size_t); -void *pmap_kenter_temporary(vm_paddr_t pa, int i); void pmap_kenter_user(vm_offset_t va, vm_paddr_t pa); -vm_paddr_t pmap_kextract(vm_offset_t va); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); void pmap_kremove(vm_offset_t); -void *pmap_mapdev(vm_offset_t, vm_size_t); -void pmap_unmapdev(vm_offset_t, vm_size_t); vm_page_t pmap_use_pt(pmap_t, vm_offset_t); void pmap_debug(int); void pmap_map_section(vm_offset_t, vm_offset_t, vm_offset_t, int, int); @@ -529,11 +507,8 @@ void pmap_kenter_section(vm_offset_t, vm_paddr_t, int flags); void pmap_kenter_supersection(vm_offset_t, uint64_t, int flags); #endif -extern char *_tmppt; - void pmap_postinit(void); -extern vm_paddr_t dump_avail[]; #endif /* _KERNEL */ #endif /* !LOCORE */ diff --git a/sys/arm/include/pmap-v6.h b/sys/arm/include/pmap-v6.h index bfc6860db0b1..c771e2276a46 100644 --- a/sys/arm/include/pmap-v6.h +++ b/sys/arm/include/pmap-v6.h @@ -89,12 +89,6 @@ typedef uint32_t ttb_entry_t; /* TTB entry */ #define NKPT2PG 32 #endif -extern vm_paddr_t phys_avail[]; -extern vm_paddr_t dump_avail[]; -extern char *_tmppt; /* poor name! */ -extern vm_offset_t virtual_avail; -extern vm_offset_t virtual_end; - /* * Pmap stuff */ @@ -170,11 +164,9 @@ struct pv_chunk { }; #ifdef _KERNEL -struct pcb; extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */ #define pmap_page_get_memattr(m) ((m)->md.pat_mode) -#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) /* * Only the following functions or macros may be used before pmap_bootstrap() @@ -183,27 +175,18 @@ extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */ */ void pmap_bootstrap(vm_offset_t ); void pmap_kenter(vm_offset_t , vm_paddr_t ); -void *pmap_kenter_temporary(vm_paddr_t , int ); void pmap_kremove(vm_offset_t); -void *pmap_mapdev(vm_paddr_t, vm_size_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); boolean_t pmap_page_is_mapped(vm_page_t ); -void pmap_page_set_memattr(vm_page_t , vm_memattr_t ); -void pmap_unmapdev(vm_offset_t, vm_size_t); -void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); -void pmap_kremove_device(vm_offset_t, vm_size_t); -void pmap_set_pcb_pagedir(pmap_t , struct pcb *); void pmap_tlb_flush(pmap_t , vm_offset_t ); void pmap_tlb_flush_range(pmap_t , vm_offset_t , vm_size_t ); void pmap_dcache_wb_range(vm_paddr_t , vm_size_t , vm_memattr_t ); -vm_paddr_t pmap_kextract(vm_offset_t ); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); int pmap_fault(pmap_t , vm_offset_t , uint32_t , int , bool); -#define vtophys(va) pmap_kextract((vm_offset_t)(va)) void pmap_set_tex(void); void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set); diff --git a/sys/arm/include/pmap.h b/sys/arm/include/pmap.h index ec69077b15ba..777216fd45db 100644 --- a/sys/arm/include/pmap.h +++ b/sys/arm/include/pmap.h @@ -38,4 +38,31 @@ #include #endif +#ifdef _KERNEL + +extern vm_paddr_t dump_avail[]; +extern vm_paddr_t phys_avail[]; + +extern char *_tmppt; /* poor name! */ + +extern vm_offset_t virtual_avail; +extern vm_offset_t virtual_end; + +void *pmap_kenter_temporary(vm_paddr_t, int); +#define pmap_page_is_write_mapped(m) (((m)->aflags & PGA_WRITEABLE) != 0) +void pmap_page_set_memattr(vm_page_t, vm_memattr_t); + +void *pmap_mapdev(vm_paddr_t, vm_size_t); +void pmap_unmapdev(vm_offset_t, vm_size_t); + +struct pcb; +void pmap_set_pcb_pagedir(pmap_t, struct pcb *); + +void pmap_kenter_device(vm_offset_t, vm_size_t, vm_paddr_t); +void pmap_kremove_device(vm_offset_t, vm_size_t); + +vm_paddr_t pmap_kextract(vm_offset_t); +#define vtophys(va) pmap_kextract((vm_offset_t)(va)) + +#endif /* _KERNEL */ #endif /* !_MACHINE_PMAP_H_ */ -- cgit v1.2.3 From 2919c53c63423f45b02e8cd7a27c0ea44683021a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20E=C3=9Fer?= Date: Fri, 19 Feb 2016 08:42:13 +0000 Subject: Remove O_SYNC from the options passed to dbmopen(). The output file is created as a temporary file that is moved over the existing file after completion. Thus there is no need to immediately flush all created db records to the temporary file. This speeds up creation of the termcap db by a factor of 40 on my ZFS based /etc filesytem (from 25 seconds to 0.6 seconds). I have compared multiple output files created with and without O_SYNC and they came out identical each time. Nonetheless it might be best to MFC this change and the similar one for services_mkdb (r295465) at the same time when the changes to hash.c in review D5186 are merged. MFC: 1 week --- usr.bin/cap_mkdb/cap_mkdb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/usr.bin/cap_mkdb/cap_mkdb.c b/usr.bin/cap_mkdb/cap_mkdb.c index bbcedd5a8d54..2f8bd96f16af 100644 --- a/usr.bin/cap_mkdb/cap_mkdb.c +++ b/usr.bin/cap_mkdb/cap_mkdb.c @@ -119,7 +119,7 @@ main(int argc, char *argv[]) (void)snprintf(buf, sizeof(buf), "%s.db", capname ? capname : *argv); if ((capname = strdup(buf)) == NULL) errx(1, "strdup failed"); - if ((capdbp = dbopen(capname, O_CREAT | O_TRUNC | O_RDWR | O_SYNC, + if ((capdbp = dbopen(capname, O_CREAT | O_TRUNC | O_RDWR, DEFFILEMODE, DB_HASH, &openinfo)) == NULL) err(1, "%s", buf); -- cgit v1.2.3 From 08674c45738549a742537ff5be60639aac784608 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Fri, 19 Feb 2016 09:23:32 +0000 Subject: Rename pte.h to pte-v4.h and start including directly either pte-v4.h or pte-v6.h in files which needs it. There are quite internal definitions in pte-v4.h and pte-v6.h headers specific for corresponding pmap implementation. These headers should be included only in very few files and an intention is to not hide for which implementation such files are. Further, sys/arm/arm/elf_trampoline.c is an example of file which uses armv4 like pmap implementation for both armv4 and armv6 platforms. This is another reason why pte.h which includes specific header according to __ARM_ARCH is not created. --- lib/libkvm/kvm_arm.h | 10 +- sys/arm/arm/elf_trampoline.c | 2 +- sys/arm/arm/locore-v4.S | 2 +- sys/arm/arm/locore-v6.S | 2 +- sys/arm/include/pmap-v4.h | 2 +- sys/arm/include/pte-v4.h | 350 ++++++++++++++++++++++++++++++++++++++++++ sys/arm/include/pte-v6.h | 6 +- sys/arm/include/pte.h | 356 ------------------------------------------- 8 files changed, 363 insertions(+), 367 deletions(-) create mode 100644 sys/arm/include/pte-v4.h delete mode 100644 sys/arm/include/pte.h diff --git a/lib/libkvm/kvm_arm.h b/lib/libkvm/kvm_arm.h index 404d63a54293..38d05cd1441a 100644 --- a/lib/libkvm/kvm_arm.h +++ b/lib/libkvm/kvm_arm.h @@ -29,10 +29,6 @@ #ifndef __KVM_ARM_H__ #define __KVM_ARM_H__ -#ifdef __arm__ -#include -#endif - typedef uint32_t arm_physaddr_t; typedef uint32_t arm_pd_entry_t; typedef uint32_t arm_pt_entry_t; @@ -75,6 +71,12 @@ typedef uint32_t arm_pt_entry_t; #ifdef __arm__ #include +#if __ARM_ARCH >= 6 +#include +#else +#include +#endif + _Static_assert(PAGE_SHIFT == ARM_PAGE_SHIFT, "PAGE_SHIFT mismatch"); _Static_assert(PAGE_SIZE == ARM_PAGE_SIZE, "PAGE_SIZE mismatch"); _Static_assert(PAGE_MASK == ARM_PAGE_MASK, "PAGE_MASK mismatch"); diff --git a/sys/arm/arm/elf_trampoline.c b/sys/arm/arm/elf_trampoline.c index 22f0f44ae575..42c4514ed845 100644 --- a/sys/arm/arm/elf_trampoline.c +++ b/sys/arm/arm/elf_trampoline.c @@ -36,7 +36,7 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include +#include #include #include diff --git a/sys/arm/arm/locore-v4.S b/sys/arm/arm/locore-v4.S index c46c0bff16d6..17210c04d340 100644 --- a/sys/arm/arm/locore-v4.S +++ b/sys/arm/arm/locore-v4.S @@ -38,7 +38,7 @@ #include #include #include -#include +#include __FBSDID("$FreeBSD$"); diff --git a/sys/arm/arm/locore-v6.S b/sys/arm/arm/locore-v6.S index 959dcc65b598..5152842ab41e 100644 --- a/sys/arm/arm/locore-v6.S +++ b/sys/arm/arm/locore-v6.S @@ -36,7 +36,7 @@ #include #include #include -#include +#include __FBSDID("$FreeBSD$"); diff --git a/sys/arm/include/pmap-v4.h b/sys/arm/include/pmap-v4.h index bfa6be49afe1..cfb675e00ac4 100644 --- a/sys/arm/include/pmap-v4.h +++ b/sys/arm/include/pmap-v4.h @@ -50,7 +50,7 @@ #ifndef _MACHINE_PMAP_V4_H_ #define _MACHINE_PMAP_V4_H_ -#include +#include #include /* * Pte related macros diff --git a/sys/arm/include/pte-v4.h b/sys/arm/include/pte-v4.h new file mode 100644 index 000000000000..7102902c18f7 --- /dev/null +++ b/sys/arm/include/pte-v4.h @@ -0,0 +1,350 @@ +/* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */ + +/*- + * Copyright (c) 1994 Mark Brinicombe. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by the RiscBSD team. + * 4. The name "RiscBSD" nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * $FreeBSD$ + */ + +#ifndef _MACHINE_PTE_V4_H_ +#define _MACHINE_PTE_V4_H_ + +#ifndef LOCORE +typedef uint32_t pd_entry_t; /* page directory entry */ +typedef uint32_t pt_entry_t; /* page table entry */ +typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ +#endif + +#define PG_FRAME 0xfffff000 + +/* The PT_SIZE definition is misleading... A page table is only 0x400 + * bytes long. But since VM mapping can only be done to 0x1000 a single + * 1KB blocks cannot be steered to a va by itself. Therefore the + * pages tables are allocated in blocks of 4. i.e. if a 1 KB block + * was allocated for a PT then the other 3KB would also get mapped + * whenever the 1KB was mapped. + */ + +#define PT_RSIZE 0x0400 /* Real page table size */ +#define PT_SIZE 0x1000 +#define PD_SIZE 0x4000 + +/* Page table types and masks */ +#define L1_PAGE 0x01 /* L1 page table mapping */ +#define L1_SECTION 0x02 /* L1 section mapping */ +#define L1_FPAGE 0x03 /* L1 fine page mapping */ +#define L1_MASK 0x03 /* Mask for L1 entry type */ +#define L2_LPAGE 0x01 /* L2 large page (64KB) */ +#define L2_SPAGE 0x02 /* L2 small page (4KB) */ +#define L2_MASK 0x03 /* Mask for L2 entry type */ +#define L2_INVAL 0x00 /* L2 invalid type */ + +/* + * The ARM MMU architecture was introduced with ARM v3 (previous ARM + * architecture versions used an optional off-CPU memory controller + * to perform address translation). + * + * The ARM MMU consists of a TLB and translation table walking logic. + * There is typically one TLB per memory interface (or, put another + * way, one TLB per software-visible cache). + * + * The ARM MMU is capable of mapping memory in the following chunks: + * + * 1M Sections (L1 table) + * + * 64K Large Pages (L2 table) + * + * 4K Small Pages (L2 table) + * + * 1K Tiny Pages (L2 table) + * + * There are two types of L2 tables: Coarse Tables and Fine Tables. + * Coarse Tables can map Large and Small Pages. Fine Tables can + * map Tiny Pages. + * + * Coarse Tables can define 4 Subpages within Large and Small pages. + * Subpages define different permissions for each Subpage within + * a Page. + * + * Coarse Tables are 1K in length. Fine tables are 4K in length. + * + * The Translation Table Base register holds the pointer to the + * L1 Table. The L1 Table is a 16K contiguous chunk of memory + * aligned to a 16K boundary. Each entry in the L1 Table maps + * 1M of virtual address space, either via a Section mapping or + * via an L2 Table. + * + * In addition, the Fast Context Switching Extension (FCSE) is available + * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating + * TLB/cache flushes on context switch by use of a smaller address space + * and a "process ID" that modifies the virtual address before being + * presented to the translation logic. + */ + +/* ARMv6 super-sections. */ +#define L1_SUP_SIZE 0x01000000 /* 16M */ +#define L1_SUP_OFFSET (L1_SUP_SIZE - 1) +#define L1_SUP_FRAME (~L1_SUP_OFFSET) +#define L1_SUP_SHIFT 24 + +#define L1_S_SIZE 0x00100000 /* 1M */ +#define L1_S_OFFSET (L1_S_SIZE - 1) +#define L1_S_FRAME (~L1_S_OFFSET) +#define L1_S_SHIFT 20 + +#define L2_L_SIZE 0x00010000 /* 64K */ +#define L2_L_OFFSET (L2_L_SIZE - 1) +#define L2_L_FRAME (~L2_L_OFFSET) +#define L2_L_SHIFT 16 + +#define L2_S_SIZE 0x00001000 /* 4K */ +#define L2_S_OFFSET (L2_S_SIZE - 1) +#define L2_S_FRAME (~L2_S_OFFSET) +#define L2_S_SHIFT 12 + +#define L2_T_SIZE 0x00000400 /* 1K */ +#define L2_T_OFFSET (L2_T_SIZE - 1) +#define L2_T_FRAME (~L2_T_OFFSET) +#define L2_T_SHIFT 10 + +/* + * The NetBSD VM implementation only works on whole pages (4K), + * whereas the ARM MMU's Coarse tables are sized in terms of 1K + * (16K L1 table, 1K L2 table). + * + * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 + * table. + */ +#define L1_TABLE_SIZE 0x4000 /* 16K */ +#define L2_TABLE_SIZE 0x1000 /* 4K */ +/* + * The new pmap deals with the 1KB coarse L2 tables by + * allocating them from a pool. Until every port has been converted, + * keep the old L2_TABLE_SIZE define lying around. Converted ports + * should use L2_TABLE_SIZE_REAL until then. + */ +#define L2_TABLE_SIZE_REAL 0x400 /* 1K */ + +/* Total number of page table entries in L2 table */ +#define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)) + +/* + * ARM L1 Descriptors + */ + +#define L1_TYPE_INV 0x00 /* Invalid (fault) */ +#define L1_TYPE_C 0x01 /* Coarse L2 */ +#define L1_TYPE_S 0x02 /* Section */ +#define L1_TYPE_F 0x03 /* Fine L2 */ +#define L1_TYPE_MASK 0x03 /* mask of type bits */ + +/* L1 Section Descriptor */ +#define L1_S_B 0x00000004 /* bufferable Section */ +#define L1_S_C 0x00000008 /* cacheable Section */ +#define L1_S_IMP 0x00000010 /* implementation defined */ +#define L1_S_XN (1 << 4) /* execute not */ +#define L1_S_DOM(x) ((x) << 5) /* domain */ +#define L1_S_DOM_MASK L1_S_DOM(0xf) +#define L1_S_AP(x) ((x) << 10) /* access permissions */ +#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ +#define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */ +#define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */ +#define L1_S_APX (1 << 15) +#define L1_SHARED (1 << 16) + +#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */ +#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */ + +#define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */ + +/* L1 Coarse Descriptor */ +#define L1_C_IMP0 0x00000004 /* implementation defined */ +#define L1_C_IMP1 0x00000008 /* implementation defined */ +#define L1_C_IMP2 0x00000010 /* implementation defined */ +#define L1_C_DOM(x) ((x) << 5) /* domain */ +#define L1_C_DOM_MASK L1_C_DOM(0xf) +#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ + +#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */ + +/* L1 Fine Descriptor */ +#define L1_F_IMP0 0x00000004 /* implementation defined */ +#define L1_F_IMP1 0x00000008 /* implementation defined */ +#define L1_F_IMP2 0x00000010 /* implementation defined */ +#define L1_F_DOM(x) ((x) << 5) /* domain */ +#define L1_F_DOM_MASK L1_F_DOM(0xf) +#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */ + +#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */ + +/* + * ARM L2 Descriptors + */ + +#define L2_TYPE_INV 0x00 /* Invalid (fault) */ +#define L2_TYPE_L 0x01 /* Large Page */ +#define L2_TYPE_S 0x02 /* Small Page */ +#define L2_TYPE_T 0x03 /* Tiny Page */ +#define L2_TYPE_MASK 0x03 /* mask of type bits */ + + /* + * This L2 Descriptor type is available on XScale processors + * when using a Coarse L1 Descriptor. The Extended Small + * Descriptor has the same format as the XScale Tiny Descriptor, + * but describes a 4K page, rather than a 1K page. + */ +#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */ + +#define L2_B 0x00000004 /* Bufferable page */ +#define L2_C 0x00000008 /* Cacheable page */ +#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */ +#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */ +#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */ +#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */ + +#define L2_SHARED (1 << 10) +#define L2_APX (1 << 9) +#define L2_XN (1 << 0) +#define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */ +#define L2_L_TEX(x) (((x) & 0x7) << 12) +#define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */ +#define L2_S_TEX(x) (((x) & 0x7) << 6) + +#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */ +#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */ +#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */ + +/* + * Access Permissions for L1 and L2 Descriptors. + */ +#define AP_W 0x01 /* writable */ +#define AP_REF 0x01 /* referenced flag */ +#define AP_U 0x02 /* user */ + +/* + * Short-hand for common AP_* constants. + * + * Note: These values assume the S (System) bit is set and + * the R (ROM) bit is clear in CP15 register 1. + */ +#define AP_KR 0x00 /* kernel read */ +#define AP_KRW 0x01 /* kernel read/write */ +#define AP_KRWUR 0x02 /* kernel read/write usr read */ +#define AP_KRWURW 0x03 /* kernel read/write usr read/write */ + +/* + * Domain Types for the Domain Access Control Register. + */ +#define DOMAIN_FAULT 0x00 /* no access */ +#define DOMAIN_CLIENT 0x01 /* client */ +#define DOMAIN_RESERVED 0x02 /* reserved */ +#define DOMAIN_MANAGER 0x03 /* manager */ + +/* + * Type Extension bits for XScale processors. + * + * Behavior of C and B when X == 0: + * + * C B Cacheable Bufferable Write Policy Line Allocate Policy + * 0 0 N N - - + * 0 1 N Y - - + * 1 0 Y Y Write-through Read Allocate + * 1 1 Y Y Write-back Read Allocate + * + * Behavior of C and B when X == 1: + * C B Cacheable Bufferable Write Policy Line Allocate Policy + * 0 0 - - - - DO NOT USE + * 0 1 N Y - - + * 1 0 Mini-Data - - - + * 1 1 Y Y Write-back R/W Allocate + */ +#define TEX_XSCALE_X 0x01 /* X modifies C and B */ +#define TEX_XSCALE_E 0x02 +#define TEX_XSCALE_T 0x04 + +/* Xscale core 3 */ + +/* + * + * Cache attributes with L2 present, S = 0 + * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce + * 0 0 0 0 0 N N - N N + * 0 0 0 0 1 N N - N Y + * 0 0 0 1 0 Y Y WT N Y + * 0 0 0 1 1 Y Y WB Y Y + * 0 0 1 0 0 N N - Y Y + * 0 0 1 0 1 N N - N N + * 0 0 1 1 0 Y Y - - N + * 0 0 1 1 1 Y Y WT Y Y + * 0 1 0 0 0 N N - N N + * 0 1 0 0 1 N/A N/A N/A N/A N/A + * 0 1 0 1 0 N/A N/A N/A N/A N/A + * 0 1 0 1 1 N/A N/A N/A N/A N/A + * 0 1 1 X X N/A N/A N/A N/A N/A + * 1 X 0 0 0 N N - N Y + * 1 X 0 0 1 Y N WB N Y + * 1 X 0 1 0 Y N WT N Y + * 1 X 0 1 1 Y N WB Y Y + * 1 X 1 0 0 N N - Y Y + * 1 X 1 0 1 Y Y WB Y Y + * 1 X 1 1 0 Y Y WT Y Y + * 1 X 1 1 1 Y Y WB Y Y + * + * + * + * + * Cache attributes with L2 present, S = 1 + * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce + * 0 0 0 0 0 N N - N N + * 0 0 0 0 1 N N - N Y + * 0 0 0 1 0 Y Y - N Y + * 0 0 0 1 1 Y Y WT Y Y + * 0 0 1 0 0 N N - Y Y + * 0 0 1 0 1 N N - N N + * 0 0 1 1 0 Y Y - - N + * 0 0 1 1 1 Y Y WT Y Y + * 0 1 0 0 0 N N - N N + * 0 1 0 0 1 N/A N/A N/A N/A N/A + * 0 1 0 1 0 N/A N/A N/A N/A N/A + * 0 1 0 1 1 N/A N/A N/A N/A N/A + * 0 1 1 X X N/A N/A N/A N/A N/A + * 1 X 0 0 0 N N - N Y + * 1 X 0 0 1 Y N - N Y + * 1 X 0 1 0 Y N - N Y + * 1 X 0 1 1 Y N - Y Y + * 1 X 1 0 0 N N - Y Y + * 1 X 1 0 1 Y Y WT Y Y + * 1 X 1 1 0 Y Y WT Y Y + * 1 X 1 1 1 Y Y WT Y Y + */ +#endif /* !_MACHINE_PTE_V4_H_ */ + +/* End of pte.h */ diff --git a/sys/arm/include/pte-v6.h b/sys/arm/include/pte-v6.h index 9febb79e2e1e..212e25da622f 100644 --- a/sys/arm/include/pte-v6.h +++ b/sys/arm/include/pte-v6.h @@ -27,8 +27,8 @@ * $FreeBSD$ */ -#ifndef _MACHINE_PTE_H_ -#define _MACHINE_PTE_H_ +#ifndef _MACHINE_PTE_V6_H_ +#define _MACHINE_PTE_V6_H_ /* * Domain Types for the Domain Access Control Register. @@ -298,4 +298,4 @@ // ----------------------------------------------------------------------------- -#endif /* !_MACHINE_PTE_H_ */ +#endif /* !_MACHINE_PTE_V6_H_ */ diff --git a/sys/arm/include/pte.h b/sys/arm/include/pte.h deleted file mode 100644 index c83ed2fbbc25..000000000000 --- a/sys/arm/include/pte.h +++ /dev/null @@ -1,356 +0,0 @@ -/* $NetBSD: pte.h,v 1.1 2001/11/23 17:39:04 thorpej Exp $ */ - -/*- - * Copyright (c) 1994 Mark Brinicombe. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by the RiscBSD team. - * 4. The name "RiscBSD" nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY RISCBSD ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL RISCBSD OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * $FreeBSD$ - */ -#include - -#if __ARM_ARCH >= 6 -#include -#else /* __ARM_ARCH >= 6 */ - -#ifndef _MACHINE_PTE_H_ -#define _MACHINE_PTE_H_ - -#ifndef LOCORE -typedef uint32_t pd_entry_t; /* page directory entry */ -typedef uint32_t pt_entry_t; /* page table entry */ -typedef pt_entry_t pt2_entry_t; /* compatibility with v6 */ -#endif - -#define PG_FRAME 0xfffff000 - -/* The PT_SIZE definition is misleading... A page table is only 0x400 - * bytes long. But since VM mapping can only be done to 0x1000 a single - * 1KB blocks cannot be steered to a va by itself. Therefore the - * pages tables are allocated in blocks of 4. i.e. if a 1 KB block - * was allocated for a PT then the other 3KB would also get mapped - * whenever the 1KB was mapped. - */ - -#define PT_RSIZE 0x0400 /* Real page table size */ -#define PT_SIZE 0x1000 -#define PD_SIZE 0x4000 - -/* Page table types and masks */ -#define L1_PAGE 0x01 /* L1 page table mapping */ -#define L1_SECTION 0x02 /* L1 section mapping */ -#define L1_FPAGE 0x03 /* L1 fine page mapping */ -#define L1_MASK 0x03 /* Mask for L1 entry type */ -#define L2_LPAGE 0x01 /* L2 large page (64KB) */ -#define L2_SPAGE 0x02 /* L2 small page (4KB) */ -#define L2_MASK 0x03 /* Mask for L2 entry type */ -#define L2_INVAL 0x00 /* L2 invalid type */ - -/* - * The ARM MMU architecture was introduced with ARM v3 (previous ARM - * architecture versions used an optional off-CPU memory controller - * to perform address translation). - * - * The ARM MMU consists of a TLB and translation table walking logic. - * There is typically one TLB per memory interface (or, put another - * way, one TLB per software-visible cache). - * - * The ARM MMU is capable of mapping memory in the following chunks: - * - * 1M Sections (L1 table) - * - * 64K Large Pages (L2 table) - * - * 4K Small Pages (L2 table) - * - * 1K Tiny Pages (L2 table) - * - * There are two types of L2 tables: Coarse Tables and Fine Tables. - * Coarse Tables can map Large and Small Pages. Fine Tables can - * map Tiny Pages. - * - * Coarse Tables can define 4 Subpages within Large and Small pages. - * Subpages define different permissions for each Subpage within - * a Page. - * - * Coarse Tables are 1K in length. Fine tables are 4K in length. - * - * The Translation Table Base register holds the pointer to the - * L1 Table. The L1 Table is a 16K contiguous chunk of memory - * aligned to a 16K boundary. Each entry in the L1 Table maps - * 1M of virtual address space, either via a Section mapping or - * via an L2 Table. - * - * In addition, the Fast Context Switching Extension (FCSE) is available - * on some ARM v4 and ARM v5 processors. FCSE is a way of eliminating - * TLB/cache flushes on context switch by use of a smaller address space - * and a "process ID" that modifies the virtual address before being - * presented to the translation logic. - */ - -/* ARMv6 super-sections. */ -#define L1_SUP_SIZE 0x01000000 /* 16M */ -#define L1_SUP_OFFSET (L1_SUP_SIZE - 1) -#define L1_SUP_FRAME (~L1_SUP_OFFSET) -#define L1_SUP_SHIFT 24 - -#define L1_S_SIZE 0x00100000 /* 1M */ -#define L1_S_OFFSET (L1_S_SIZE - 1) -#define L1_S_FRAME (~L1_S_OFFSET) -#define L1_S_SHIFT 20 - -#define L2_L_SIZE 0x00010000 /* 64K */ -#define L2_L_OFFSET (L2_L_SIZE - 1) -#define L2_L_FRAME (~L2_L_OFFSET) -#define L2_L_SHIFT 16 - -#define L2_S_SIZE 0x00001000 /* 4K */ -#define L2_S_OFFSET (L2_S_SIZE - 1) -#define L2_S_FRAME (~L2_S_OFFSET) -#define L2_S_SHIFT 12 - -#define L2_T_SIZE 0x00000400 /* 1K */ -#define L2_T_OFFSET (L2_T_SIZE - 1) -#define L2_T_FRAME (~L2_T_OFFSET) -#define L2_T_SHIFT 10 - -/* - * The NetBSD VM implementation only works on whole pages (4K), - * whereas the ARM MMU's Coarse tables are sized in terms of 1K - * (16K L1 table, 1K L2 table). - * - * So, we allocate L2 tables 4 at a time, thus yielding a 4K L2 - * table. - */ -#define L1_TABLE_SIZE 0x4000 /* 16K */ -#define L2_TABLE_SIZE 0x1000 /* 4K */ -/* - * The new pmap deals with the 1KB coarse L2 tables by - * allocating them from a pool. Until every port has been converted, - * keep the old L2_TABLE_SIZE define lying around. Converted ports - * should use L2_TABLE_SIZE_REAL until then. - */ -#define L2_TABLE_SIZE_REAL 0x400 /* 1K */ - -/* Total number of page table entries in L2 table */ -#define L2_PTE_NUM_TOTAL (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)) - -/* - * ARM L1 Descriptors - */ - -#define L1_TYPE_INV 0x00 /* Invalid (fault) */ -#define L1_TYPE_C 0x01 /* Coarse L2 */ -#define L1_TYPE_S 0x02 /* Section */ -#define L1_TYPE_F 0x03 /* Fine L2 */ -#define L1_TYPE_MASK 0x03 /* mask of type bits */ - -/* L1 Section Descriptor */ -#define L1_S_B 0x00000004 /* bufferable Section */ -#define L1_S_C 0x00000008 /* cacheable Section */ -#define L1_S_IMP 0x00000010 /* implementation defined */ -#define L1_S_XN (1 << 4) /* execute not */ -#define L1_S_DOM(x) ((x) << 5) /* domain */ -#define L1_S_DOM_MASK L1_S_DOM(0xf) -#define L1_S_AP(x) ((x) << 10) /* access permissions */ -#define L1_S_ADDR_MASK 0xfff00000 /* phys address of section */ -#define L1_S_TEX(x) (((x) & 0x7) << 12) /* Type Extension */ -#define L1_S_TEX_MASK (0x7 << 12) /* Type Extension */ -#define L1_S_APX (1 << 15) -#define L1_SHARED (1 << 16) - -#define L1_S_XSCALE_P 0x00000200 /* ECC enable for this section */ -#define L1_S_XSCALE_TEX(x) ((x) << 12) /* Type Extension */ - -#define L1_S_SUPERSEC ((1) << 18) /* Section is a super-section. */ - -/* L1 Coarse Descriptor */ -#define L1_C_IMP0 0x00000004 /* implementation defined */ -#define L1_C_IMP1 0x00000008 /* implementation defined */ -#define L1_C_IMP2 0x00000010 /* implementation defined */ -#define L1_C_DOM(x) ((x) << 5) /* domain */ -#define L1_C_DOM_MASK L1_C_DOM(0xf) -#define L1_C_ADDR_MASK 0xfffffc00 /* phys address of L2 Table */ - -#define L1_C_XSCALE_P 0x00000200 /* ECC enable for this section */ - -/* L1 Fine Descriptor */ -#define L1_F_IMP0 0x00000004 /* implementation defined */ -#define L1_F_IMP1 0x00000008 /* implementation defined */ -#define L1_F_IMP2 0x00000010 /* implementation defined */ -#define L1_F_DOM(x) ((x) << 5) /* domain */ -#define L1_F_DOM_MASK L1_F_DOM(0xf) -#define L1_F_ADDR_MASK 0xfffff000 /* phys address of L2 Table */ - -#define L1_F_XSCALE_P 0x00000200 /* ECC enable for this section */ - -/* - * ARM L2 Descriptors - */ - -#define L2_TYPE_INV 0x00 /* Invalid (fault) */ -#define L2_TYPE_L 0x01 /* Large Page */ -#define L2_TYPE_S 0x02 /* Small Page */ -#define L2_TYPE_T 0x03 /* Tiny Page */ -#define L2_TYPE_MASK 0x03 /* mask of type bits */ - - /* - * This L2 Descriptor type is available on XScale processors - * when using a Coarse L1 Descriptor. The Extended Small - * Descriptor has the same format as the XScale Tiny Descriptor, - * but describes a 4K page, rather than a 1K page. - */ -#define L2_TYPE_XSCALE_XS 0x03 /* XScale Extended Small Page */ - -#define L2_B 0x00000004 /* Bufferable page */ -#define L2_C 0x00000008 /* Cacheable page */ -#define L2_AP0(x) ((x) << 4) /* access permissions (sp 0) */ -#define L2_AP1(x) ((x) << 6) /* access permissions (sp 1) */ -#define L2_AP2(x) ((x) << 8) /* access permissions (sp 2) */ -#define L2_AP3(x) ((x) << 10) /* access permissions (sp 3) */ - -#define L2_SHARED (1 << 10) -#define L2_APX (1 << 9) -#define L2_XN (1 << 0) -#define L2_L_TEX_MASK (0x7 << 12) /* Type Extension */ -#define L2_L_TEX(x) (((x) & 0x7) << 12) -#define L2_S_TEX_MASK (0x7 << 6) /* Type Extension */ -#define L2_S_TEX(x) (((x) & 0x7) << 6) - -#define L2_XSCALE_L_TEX(x) ((x) << 12) /* Type Extension */ -#define L2_XSCALE_L_S(x) (1 << 15) /* Shared */ -#define L2_XSCALE_T_TEX(x) ((x) << 6) /* Type Extension */ - -/* - * Access Permissions for L1 and L2 Descriptors. - */ -#define AP_W 0x01 /* writable */ -#define AP_REF 0x01 /* referenced flag */ -#define AP_U 0x02 /* user */ - -/* - * Short-hand for common AP_* constants. - * - * Note: These values assume the S (System) bit is set and - * the R (ROM) bit is clear in CP15 register 1. - */ -#define AP_KR 0x00 /* kernel read */ -#define AP_KRW 0x01 /* kernel read/write */ -#define AP_KRWUR 0x02 /* kernel read/write usr read */ -#define AP_KRWURW 0x03 /* kernel read/write usr read/write */ - -/* - * Domain Types for the Domain Access Control Register. - */ -#define DOMAIN_FAULT 0x00 /* no access */ -#define DOMAIN_CLIENT 0x01 /* client */ -#define DOMAIN_RESERVED 0x02 /* reserved */ -#define DOMAIN_MANAGER 0x03 /* manager */ - -/* - * Type Extension bits for XScale processors. - * - * Behavior of C and B when X == 0: - * - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 N N - - - * 0 1 N Y - - - * 1 0 Y Y Write-through Read Allocate - * 1 1 Y Y Write-back Read Allocate - * - * Behavior of C and B when X == 1: - * C B Cacheable Bufferable Write Policy Line Allocate Policy - * 0 0 - - - - DO NOT USE - * 0 1 N Y - - - * 1 0 Mini-Data - - - - * 1 1 Y Y Write-back R/W Allocate - */ -#define TEX_XSCALE_X 0x01 /* X modifies C and B */ -#define TEX_XSCALE_E 0x02 -#define TEX_XSCALE_T 0x04 - -/* Xscale core 3 */ - -/* - * - * Cache attributes with L2 present, S = 0 - * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce - * 0 0 0 0 0 N N - N N - * 0 0 0 0 1 N N - N Y - * 0 0 0 1 0 Y Y WT N Y - * 0 0 0 1 1 Y Y WB Y Y - * 0 0 1 0 0 N N - Y Y - * 0 0 1 0 1 N N - N N - * 0 0 1 1 0 Y Y - - N - * 0 0 1 1 1 Y Y WT Y Y - * 0 1 0 0 0 N N - N N - * 0 1 0 0 1 N/A N/A N/A N/A N/A - * 0 1 0 1 0 N/A N/A N/A N/A N/A - * 0 1 0 1 1 N/A N/A N/A N/A N/A - * 0 1 1 X X N/A N/A N/A N/A N/A - * 1 X 0 0 0 N N - N Y - * 1 X 0 0 1 Y N WB N Y - * 1 X 0 1 0 Y N WT N Y - * 1 X 0 1 1 Y N WB Y Y - * 1 X 1 0 0 N N - Y Y - * 1 X 1 0 1 Y Y WB Y Y - * 1 X 1 1 0 Y Y WT Y Y - * 1 X 1 1 1 Y Y WB Y Y - * - * - * - * - * Cache attributes with L2 present, S = 1 - * T E X C B L1 i-cache L1 d-cache L1 DC WP L2 cacheable write coalesce - * 0 0 0 0 0 N N - N N - * 0 0 0 0 1 N N - N Y - * 0 0 0 1 0 Y Y - N Y - * 0 0 0 1 1 Y Y WT Y Y - * 0 0 1 0 0 N N - Y Y - * 0 0 1 0 1 N N - N N - * 0 0 1 1 0 Y Y - - N - * 0 0 1 1 1 Y Y WT Y Y - * 0 1 0 0 0 N N - N N - * 0 1 0 0 1 N/A N/A N/A N/A N/A - * 0 1 0 1 0 N/A N/A N/A N/A N/A - * 0 1 0 1 1 N/A N/A N/A N/A N/A - * 0 1 1 X X N/A N/A N/A N/A N/A - * 1 X 0 0 0 N N - N Y - * 1 X 0 0 1 Y N - N Y - * 1 X 0 1 0 Y N - N Y - * 1 X 0 1 1 Y N - Y Y - * 1 X 1 0 0 N N - Y Y - * 1 X 1 0 1 Y Y WT Y Y - * 1 X 1 1 0 Y Y WT Y Y - * 1 X 1 1 1 Y Y WT Y Y - */ -#endif /* !_MACHINE_PTE_H_ */ -#endif /* __ARM_ARCH >= 6 */ - -/* End of pte.h */ -- cgit v1.2.3 From a569bb2f9fe4c067db3ba2f72c58ac53f9567a23 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Fri, 19 Feb 2016 09:52:11 +0000 Subject: Remove AP_KRW definition not needed after r295801. --- sys/arm/include/pte-v6.h | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/sys/arm/include/pte-v6.h b/sys/arm/include/pte-v6.h index 212e25da622f..cc92b28f1aa2 100644 --- a/sys/arm/include/pte-v6.h +++ b/sys/arm/include/pte-v6.h @@ -288,14 +288,4 @@ #define PTE2_KERN(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_G, attr) #define PTE2_KERN_NG(pa, ap, attr) PTE2(pa, (ap) | PTE2_A | PTE2_NG, attr) - -// ----------------- TO BE DELETED --------------------------------------------- - -/* - * sys/arm/arm/elf_trampoline.c - */ -#define AP_KRW 0x01 /* kernel read/write */ - -// ----------------------------------------------------------------------------- - #endif /* !_MACHINE_PTE_V6_H_ */ -- cgit v1.2.3 From 3d5822f1a9885caddbdbce6bba5424b8a2c3ea9d Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Fri, 19 Feb 2016 10:32:17 +0000 Subject: Remove not used definitions and fix some style nits. No functional changes. --- sys/arm/include/pmap-v6.h | 36 ++++++++++++------------------------ 1 file changed, 12 insertions(+), 24 deletions(-) diff --git a/sys/arm/include/pmap-v6.h b/sys/arm/include/pmap-v6.h index c771e2276a46..10296db2ccbf 100644 --- a/sys/arm/include/pmap-v6.h +++ b/sys/arm/include/pmap-v6.h @@ -88,21 +88,11 @@ typedef uint32_t ttb_entry_t; /* TTB entry */ */ #define NKPT2PG 32 #endif +#endif /* _KERNEL */ /* * Pmap stuff */ - -/* - * This structure is used to hold a virtual<->physical address - * association and is used mostly by bootstrap code - */ -struct pv_addr { - SLIST_ENTRY(pv_addr) pv_list; - vm_offset_t pv_va; - vm_paddr_t pv_pa; -}; -#endif struct pv_entry; struct pv_chunk; @@ -173,20 +163,18 @@ extern ttb_entry_t pmap_kern_ttb; /* TTB for kernel pmap */ * is called: pmap_kenter(), pmap_kextract(), pmap_kremove(), vtophys(), and * vtopte2(). */ -void pmap_bootstrap(vm_offset_t ); -void pmap_kenter(vm_offset_t , vm_paddr_t ); +void pmap_bootstrap(vm_offset_t); +void pmap_kenter(vm_offset_t, vm_paddr_t); void pmap_kremove(vm_offset_t); void *pmap_mapdev_attr(vm_paddr_t, vm_size_t, int); -boolean_t pmap_page_is_mapped(vm_page_t ); - -void pmap_tlb_flush(pmap_t , vm_offset_t ); -void pmap_tlb_flush_range(pmap_t , vm_offset_t , vm_size_t ); +boolean_t pmap_page_is_mapped(vm_page_t); -void pmap_dcache_wb_range(vm_paddr_t , vm_size_t , vm_memattr_t ); +void pmap_tlb_flush(pmap_t, vm_offset_t); +void pmap_tlb_flush_range(pmap_t, vm_offset_t, vm_size_t); vm_paddr_t pmap_dump_kextract(vm_offset_t, pt2_entry_t *); -int pmap_fault(pmap_t , vm_offset_t , uint32_t , int , bool); +int pmap_fault(pmap_t, vm_offset_t, uint32_t, int, bool); void pmap_set_tex(void); void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set); @@ -194,11 +182,11 @@ void reinit_mmu(ttb_entry_t ttb, u_int aux_clr, u_int aux_set); /* * Pre-bootstrap epoch functions set. */ -void pmap_bootstrap_prepare(vm_paddr_t ); -vm_paddr_t pmap_preboot_get_pages(u_int ); -void pmap_preboot_map_pages(vm_paddr_t , vm_offset_t , u_int ); -vm_offset_t pmap_preboot_reserve_pages(u_int ); -vm_offset_t pmap_preboot_get_vpages(u_int ); +void pmap_bootstrap_prepare(vm_paddr_t); +vm_paddr_t pmap_preboot_get_pages(u_int); +void pmap_preboot_map_pages(vm_paddr_t, vm_offset_t, u_int); +vm_offset_t pmap_preboot_reserve_pages(u_int); +vm_offset_t pmap_preboot_get_vpages(u_int); void pmap_preboot_map_attr(vm_paddr_t, vm_offset_t, vm_size_t, vm_prot_t, vm_memattr_t); -- cgit v1.2.3 From b38523dc0f1865ffb7b801d2d42c8b06364561c8 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Fri, 19 Feb 2016 10:40:04 +0000 Subject: Remove not used static function pmap_kenter_attr(). --- sys/arm/arm/pmap-v6.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index 05e0137aa596..bb1b0ba1e79c 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -1267,13 +1267,6 @@ pmap_kenter_prot_attr(vm_offset_t va, vm_paddr_t pa, uint32_t prot, pte2_store(pte2p, PTE2_KERN(pa, prot, attr)); } -static __inline void -pmap_kenter_attr(vm_offset_t va, vm_paddr_t pa, int attr) -{ - - pmap_kenter_prot_attr(va, pa, PTE2_AP_KRW, attr); -} - PMAP_INLINE void pmap_kenter(vm_offset_t va, vm_paddr_t pa) { -- cgit v1.2.3 From 64a3a6304e31a60badf015637339f0d869f8b46e Mon Sep 17 00:00:00 2001 From: Michael Tuexen Date: Fri, 19 Feb 2016 11:25:18 +0000 Subject: Use the SCTP level pointer, not the interface level. MFC after: 3 days --- sys/netinet/sctp_pcb.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/sys/netinet/sctp_pcb.c b/sys/netinet/sctp_pcb.c index 84d83f4b2dc2..7e3218da687e 100644 --- a/sys/netinet/sctp_pcb.c +++ b/sys/netinet/sctp_pcb.c @@ -5432,7 +5432,7 @@ sctp_select_primary_destination(struct sctp_tcb *stcb) /* - * Delete the address from the endpoint local address list There is nothing + * Delete the address from the endpoint local address list. There is nothing * to be done if we are bound to all addresses */ void @@ -5483,8 +5483,7 @@ sctp_del_local_addr_ep(struct sctp_inpcb *inp, struct sctp_ifa *ifa) * to laddr */ TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) { - if (net->ro._s_addr && - (net->ro._s_addr->ifa == laddr->ifa)) { + if (net->ro._s_addr == laddr->ifa) { /* Yep, purge src address selected */ sctp_rtentry_t *rt; -- cgit v1.2.3 From a28a4d77b6ae826aec5331854d735c0f360c7115 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20E=C3=9Fer?= Date: Fri, 19 Feb 2016 14:01:35 +0000 Subject: =?UTF-8?q?Fix=20possible=20out-of-bounds=20access=20detected=20by?= =?UTF-8?q?=20Ulrich=20Sp=C3=B6rleins=20"scan-build".=20Some=20invalid=20P?= =?UTF-8?q?CI=20device=20selectors=20could=20cause=20read=20access=20to=20?= =?UTF-8?q?an=20initialized=20variable=20next=20to=20the=20array=20(local?= =?UTF-8?q?=20loop=20index=20variable).?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit While here, the parser has been made more strict with regard to the syntax of PCI device selectors as documented in the man-page. E.g. "pci:" used to be interpreted as "pci0:0". MFC after: 3 days --- usr.sbin/pciconf/pciconf.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) diff --git a/usr.sbin/pciconf/pciconf.c b/usr.sbin/pciconf/pciconf.c index e743a891a2a6..d62ce77f6ccf 100644 --- a/usr.sbin/pciconf/pciconf.c +++ b/usr.sbin/pciconf/pciconf.c @@ -897,7 +897,6 @@ static struct pcisel parsesel(const char *str) { const char *ep; - const char *epbase; char *eppos; struct pcisel sel; unsigned long selarr[4]; @@ -909,30 +908,27 @@ parsesel(const char *str) else ep = str; - epbase = ep; - if (strncmp(ep, "pci", 3) == 0) { ep += 3; i = 0; - do { + while (isdigit(*ep) && i < 4) { selarr[i++] = strtoul(ep, &eppos, 10); ep = eppos; - } while ((*ep == ':' || *ep == '.') && *++ep != '\0' && i < 4); - - if (i > 2) - sel.pc_func = selarr[--i]; - else - sel.pc_func = 0; - sel.pc_dev = selarr[--i]; - sel.pc_bus = selarr[--i]; - if (i > 0) - sel.pc_domain = selarr[--i]; - else - sel.pc_domain = 0; + if (*ep == ':') { + ep++; + if (*ep == '\0') + i = 0; + } + } + if (i > 0 && *ep == '\0') { + sel.pc_func = (i > 2) ? selarr[--i] : 0; + sel.pc_dev = (i > 0) ? selarr[--i] : 0; + sel.pc_bus = (i > 0) ? selarr[--i] : 0; + sel.pc_domain = (i > 0) ? selarr[--i] : 0; + return (sel); + } } - if (*ep != '\x0' || ep == epbase) - errx(1, "cannot parse selector %s", str); - return sel; + errx(1, "cannot parse selector %s", str); } static struct pcisel -- cgit v1.2.3 From f9a32acb48e95ec4238c32577739af13b63c7d58 Mon Sep 17 00:00:00 2001 From: Andrew Turner Date: Fri, 19 Feb 2016 14:05:28 +0000 Subject: Include ofw_bus_subr.h before ofw_pci.h for the definition of struct ofw_bus_iinfo. Sponsored by: ABT Systems Ltd --- sys/arm/mv/mv_pci.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/arm/mv/mv_pci.c b/sys/arm/mv/mv_pci.c index 91ffa4702e0b..49d095d1820e 100644 --- a/sys/arm/mv/mv_pci.c +++ b/sys/arm/mv/mv_pci.c @@ -61,8 +61,8 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include +#include #include #include #include -- cgit v1.2.3 From 0bd11ac8f937906e1a61600d87755db386b32124 Mon Sep 17 00:00:00 2001 From: Andrew Turner Date: Fri, 19 Feb 2016 14:15:31 +0000 Subject: Add initial support for the Allwinner A31i and A31s. This just adds the FDT platform code to detect when we are booting on one of these SoCs. The driver changes will be added shortly. Submitted by: Emmanuel Vadot Differential Revision: https://reviews.freebsd.org/D5338 --- sys/arm/allwinner/allwinner_machdep.c | 35 +++++++++++++++++++++++++++++++++++ sys/arm/allwinner/allwinner_machdep.h | 3 +++ 2 files changed, 38 insertions(+) diff --git a/sys/arm/allwinner/allwinner_machdep.c b/sys/arm/allwinner/allwinner_machdep.c index 5e8e842a2b1d..1b584fe4cd18 100644 --- a/sys/arm/allwinner/allwinner_machdep.c +++ b/sys/arm/allwinner/allwinner_machdep.c @@ -75,6 +75,23 @@ a20_attach(platform_t plat) return (0); } +static int +a31_attach(platform_t plat) +{ + soc_type = ALLWINNERSOC_A31; + soc_family = ALLWINNERSOC_SUN6I; + + return (0); +} + +static int +a31s_attach(platform_t plat) +{ + soc_type = ALLWINNERSOC_A31S; + soc_family = ALLWINNERSOC_SUN6I; + + return (0); +} static vm_offset_t allwinner_lastaddr(platform_t plat) @@ -138,6 +155,22 @@ static platform_method_t a20_methods[] = { PLATFORMMETHOD_END, }; +static platform_method_t a31_methods[] = { + PLATFORMMETHOD(platform_attach, a31_attach), + PLATFORMMETHOD(platform_lastaddr, allwinner_lastaddr), + PLATFORMMETHOD(platform_devmap_init, allwinner_devmap_init), + + PLATFORMMETHOD_END, +}; + +static platform_method_t a31s_methods[] = { + PLATFORMMETHOD(platform_attach, a31s_attach), + PLATFORMMETHOD(platform_lastaddr, allwinner_lastaddr), + PLATFORMMETHOD(platform_devmap_init, allwinner_devmap_init), + + PLATFORMMETHOD_END, +}; + u_int allwinner_soc_type(void) { @@ -152,3 +185,5 @@ allwinner_soc_family(void) FDT_PLATFORM_DEF(a10, "a10", 0, "allwinner,sun4i-a10"); FDT_PLATFORM_DEF(a20, "a20", 0, "allwinner,sun7i-a20"); +FDT_PLATFORM_DEF(a31, "a31", 0, "allwinner,sun6i-a31"); +FDT_PLATFORM_DEF(a31s, "a31s", 0, "allwinner,sun6i-a31s"); diff --git a/sys/arm/allwinner/allwinner_machdep.h b/sys/arm/allwinner/allwinner_machdep.h index 8718d63bd3e6..c640494466e8 100644 --- a/sys/arm/allwinner/allwinner_machdep.h +++ b/sys/arm/allwinner/allwinner_machdep.h @@ -34,9 +34,12 @@ #define ALLWINNERSOC_A13 0x13000000 #define ALLWINNERSOC_A10S 0x10000001 #define ALLWINNERSOC_A20 0x20000000 +#define ALLWINNERSOC_A31 0x31000000 +#define ALLWINNERSOC_A31S 0x31000001 #define ALLWINNERSOC_SUN4I 0x40000000 #define ALLWINNERSOC_SUN5I 0x50000000 +#define ALLWINNERSOC_SUN6I 0x60000000 #define ALLWINNERSOC_SUN7I 0x70000000 u_int allwinner_soc_type(void); -- cgit v1.2.3 From 156e1855a4cd63cd0c13d006b7006870bc8b1ff2 Mon Sep 17 00:00:00 2001 From: Ed Maste Date: Fri, 19 Feb 2016 15:11:54 +0000 Subject: Remove objcopy in WITHOUT_TOOLCHAIN if it's from elftoolchain --- tools/build/mk/OptionalObsoleteFiles.inc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/tools/build/mk/OptionalObsoleteFiles.inc b/tools/build/mk/OptionalObsoleteFiles.inc index 87a40ba583c9..0dec669b6274 100644 --- a/tools/build/mk/OptionalObsoleteFiles.inc +++ b/tools/build/mk/OptionalObsoleteFiles.inc @@ -7946,6 +7946,10 @@ OLD_FILES+=usr/share/man/man1/readelf.1.gz OLD_FILES+=usr/share/man/man1/size.1.gz OLD_FILES+=usr/share/man/man1/strings.1.gz OLD_FILES+=usr/share/man/man1/strip.1.gz +.if ${MK_ELFCOPY_AS_OBJCOPY} != no +OLD_FILES+=usr/bin/objcopy +OLD_FILES+=usr/share/man/man1/objcopy.1.gz +.endif .endif .if ${MK_TOOLCHAIN} == no || ${MK_ELFCOPY_AS_OBJCOPY} != no OLD_FILES+=usr/bin/elfcopy -- cgit v1.2.3 From 930143bd86568dec8ddf56c35154389549d71b78 Mon Sep 17 00:00:00 2001 From: "Pedro F. Giffuni" Date: Fri, 19 Feb 2016 15:35:20 +0000 Subject: firewire: fix a mismatch introduced in r230558. Found by: PVS Static Analysis Reviewed by: sbruno MFC after: 1 month --- sys/dev/firewire/sbp_targ.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/dev/firewire/sbp_targ.c b/sys/dev/firewire/sbp_targ.c index 0d78e9f1e8b3..bd7e924f40d1 100644 --- a/sys/dev/firewire/sbp_targ.c +++ b/sys/dev/firewire/sbp_targ.c @@ -1324,7 +1324,7 @@ sbp_targ_action1(struct cam_sim *sim, union ccb *ccb) | PIT_DISCONNECT | PIT_TERM_IO; cpi->transport = XPORT_SPI; /* FIXME add XPORT_FW type to cam */ - cpi->hba_misc = PIM_NOBUSRESET | PIM_NOBUSRESET; + cpi->hba_misc = PIM_NOBUSRESET | PIM_NO_6_BYTE; cpi->hba_eng_cnt = 0; cpi->max_target = 7; /* XXX */ cpi->max_lun = MAX_LUN - 1; -- cgit v1.2.3 From 308c3c240f6cc1a1023b859374c37656658b115e Mon Sep 17 00:00:00 2001 From: "Pedro F. Giffuni" Date: Fri, 19 Feb 2016 15:53:08 +0000 Subject: Ext2: cleanup setting of ctime/mtime/birthtime. This adopts the same change as r291936 for UFS. Directly clear IN_ACCESS or IN_UPDATE when user supplied the time, and copy the value into the inode. This keeps the behaviour cleaner and is consistent with UFS. Reviewed by: bde MFC after: 1 month (only 10) --- sys/fs/ext2fs/ext2_vnops.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/sys/fs/ext2fs/ext2_vnops.c b/sys/fs/ext2fs/ext2_vnops.c index 42e11c7174ac..7c4d3aeb3f10 100644 --- a/sys/fs/ext2fs/ext2_vnops.c +++ b/sys/fs/ext2fs/ext2_vnops.c @@ -464,16 +464,14 @@ ext2_setattr(struct vop_setattr_args *ap) ((vap->va_vaflags & VA_UTIMES_NULL) == 0 || (error = VOP_ACCESS(vp, VWRITE, cred, td)))) return (error); - if (vap->va_atime.tv_sec != VNOVAL) - ip->i_flag |= IN_ACCESS; - if (vap->va_mtime.tv_sec != VNOVAL) - ip->i_flag |= IN_CHANGE | IN_UPDATE; - ext2_itimes(vp); + ip->i_flag |= IN_CHANGE | IN_MODIFIED; if (vap->va_atime.tv_sec != VNOVAL) { + ip->i_flag &= ~IN_ACCESS; ip->i_atime = vap->va_atime.tv_sec; ip->i_atimensec = vap->va_atime.tv_nsec; } if (vap->va_mtime.tv_sec != VNOVAL) { + ip->i_flag &= ~IN_UPDATE; ip->i_mtime = vap->va_mtime.tv_sec; ip->i_mtimensec = vap->va_mtime.tv_nsec; } -- cgit v1.2.3 From 755ae20cd769b655271bb07c659fee633e2bfeeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20E=C3=9Fer?= Date: Fri, 19 Feb 2016 16:43:03 +0000 Subject: Remove redundant check for "(dinfo != NULL)", it has already been performed as the first part of this complex loop conditional. Found by: PVS Static Analysis --- sys/dev/pci/pci_user.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c index e69fde25e0b8..ecfa2e293c3b 100644 --- a/sys/dev/pci/pci_user.c +++ b/sys/dev/pci/pci_user.c @@ -709,9 +709,9 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t * that match the user's criteria. */ for (cio->num_matches = 0, error = 0, i = 0, - dinfo = STAILQ_FIRST(devlist_head); - (dinfo != NULL) && (cio->num_matches < ionum) - && (error == 0) && (i < pci_numdevs) && (dinfo != NULL); + dinfo = STAILQ_FIRST(devlist_head); + (dinfo != NULL) && (cio->num_matches < ionum) && + (error == 0) && (i < pci_numdevs)); dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { if (i < cio->offset) -- cgit v1.2.3 From 5a1a8ad938ea46ef5fe7f781731c7a7915964777 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Stefan=20E=C3=9Fer?= Date: Fri, 19 Feb 2016 16:53:21 +0000 Subject: Fix syntax error introduced in previous commit where I removed one character to few. I should have waited for the kernel compile to finish, even though the change seemed so trivial. --- sys/dev/pci/pci_user.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/dev/pci/pci_user.c b/sys/dev/pci/pci_user.c index ecfa2e293c3b..f5a921a75ff1 100644 --- a/sys/dev/pci/pci_user.c +++ b/sys/dev/pci/pci_user.c @@ -711,7 +711,7 @@ pci_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *t for (cio->num_matches = 0, error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (cio->num_matches < ionum) && - (error == 0) && (i < pci_numdevs)); + (error == 0) && (i < pci_numdevs); dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { if (i < cio->offset) -- cgit v1.2.3 From a3ec59c20a0c32d7c758f078750903ee585d05b5 Mon Sep 17 00:00:00 2001 From: Jilles Tjoelker Date: Fri, 19 Feb 2016 16:56:07 +0000 Subject: sh: Add tests for comments in sh -c. --- bin/sh/tests/parser/Makefile | 2 ++ bin/sh/tests/parser/comment1.0 | 3 +++ bin/sh/tests/parser/comment2.42 | 4 ++++ 3 files changed, 9 insertions(+) create mode 100644 bin/sh/tests/parser/comment1.0 create mode 100644 bin/sh/tests/parser/comment2.42 diff --git a/bin/sh/tests/parser/Makefile b/bin/sh/tests/parser/Makefile index 0d2ca0f9f83f..78a0734d9be7 100644 --- a/bin/sh/tests/parser/Makefile +++ b/bin/sh/tests/parser/Makefile @@ -25,6 +25,8 @@ FILES+= alias15.0 alias15.0.stdout FILES+= and-pipe-not.0 FILES+= case1.0 FILES+= case2.0 +FILES+= comment1.0 +FILES+= comment2.42 FILES+= dollar-quote1.0 FILES+= dollar-quote2.0 FILES+= dollar-quote3.0 diff --git a/bin/sh/tests/parser/comment1.0 b/bin/sh/tests/parser/comment1.0 new file mode 100644 index 000000000000..21e7ade957bc --- /dev/null +++ b/bin/sh/tests/parser/comment1.0 @@ -0,0 +1,3 @@ +# $FreeBSD$ + +${SH} -c '#' diff --git a/bin/sh/tests/parser/comment2.42 b/bin/sh/tests/parser/comment2.42 new file mode 100644 index 000000000000..196b73354493 --- /dev/null +++ b/bin/sh/tests/parser/comment2.42 @@ -0,0 +1,4 @@ +# $FreeBSD$ + +${SH} -c '# +exit 42' -- cgit v1.2.3 From e0f6860d51895bd349172c6f714862755c63e9ca Mon Sep 17 00:00:00 2001 From: "Pedro F. Giffuni" Date: Fri, 19 Feb 2016 18:05:02 +0000 Subject: qlxgb: fix mismatch. Found by: PVS Static Analysis Reviewed by: davidcs MFC after: 1 month --- sys/dev/qlxgb/qla_hw.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sys/dev/qlxgb/qla_hw.c b/sys/dev/qlxgb/qla_hw.c index e9c635d03fc9..d71bb296c24a 100644 --- a/sys/dev/qlxgb/qla_hw.c +++ b/sys/dev/qlxgb/qla_hw.c @@ -797,7 +797,8 @@ qla_tx_tso(qla_host_t *ha, struct mbuf *mp, q80_tx_cmd_t *tx_cmd, uint8_t *hdr) } if ((*tcp_opt != 0x01) || (*(tcp_opt + 1) != 0x01) || - (*(tcp_opt + 2) != 0x08) || (*(tcp_opt + 2) != 10)) { + (*(tcp_opt + 2) != 0x08) || + (*(tcp_opt + 3) != 10)) { return -1; } } -- cgit v1.2.3 From 34b133bcd50a6c2e2dbb59817323d97007e122ba Mon Sep 17 00:00:00 2001 From: David C Somayajulu Date: Fri, 19 Feb 2016 21:32:49 +0000 Subject: Modified the use of bxe_grc_dump() function so that it can be invoked directly at any potential error path, where a fwdump is needed. The fwdump (a.k.a grcdump) is stored in a driver buffer. The sysctl grcdump_done indicates if a fwdump was taken and waiting to be retrieved. The sysctl trigger_grcdump can be used to manually trigger a fwdump. MFC after:5 days --- sys/dev/bxe/bxe.c | 96 ++++++++++++++++++++++++++++++++----------------------- sys/dev/bxe/bxe.h | 2 -- 2 files changed, 56 insertions(+), 42 deletions(-) diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c index ddaa88a6156b..ef52731ffe20 100644 --- a/sys/dev/bxe/bxe.c +++ b/sys/dev/bxe/bxe.c @@ -738,6 +738,7 @@ static void bxe_handle_fp_tq(void *context, int pending); static int bxe_add_cdev(struct bxe_softc *sc); static void bxe_del_cdev(struct bxe_softc *sc); +static int bxe_grc_dump(struct bxe_softc *sc); /* calculate crc32 on a buffer (NOTE: crc32_length MUST be aligned to 8) */ uint32_t @@ -7934,6 +7935,16 @@ bxe_chk_parity_attn(struct bxe_softc *sc, attn.sig[2] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4); attn.sig[3] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4); + /* + * Since MCP attentions can't be disabled inside the block, we need to + * read AEU registers to see whether they're currently disabled + */ + attn.sig[3] &= ((REG_RD(sc, (!port ? MISC_REG_AEU_ENABLE4_FUNC_0_OUT_0 + : MISC_REG_AEU_ENABLE4_FUNC_1_OUT_0)) & + MISC_AEU_ENABLE_MCP_PRTY_BITS) | + ~MISC_AEU_ENABLE_MCP_PRTY_BITS); + + if (!CHIP_IS_E1x(sc)) attn.sig[4] = REG_RD(sc, MISC_REG_AEU_AFTER_INVERT_5_FUNC_0 + port*4); @@ -16147,6 +16158,30 @@ bxe_sysctl_state(SYSCTL_HANDLER_ARGS) return (error); } +static int +bxe_sysctl_trigger_grcdump(SYSCTL_HANDLER_ARGS) +{ + struct bxe_softc *sc; + int error, result; + + result = 0; + error = sysctl_handle_int(oidp, &result, 0, req); + + if (error || !req->newptr) { + return (error); + } + + if (result == 1) { + sc = (struct bxe_softc *)arg1; + + BLOGI(sc, "... grcdump start ...\n"); + bxe_grc_dump(sc); + BLOGI(sc, "... grcdump done ...\n"); + } + + return (error); +} + static int bxe_sysctl_eth_stat(SYSCTL_HANDLER_ARGS) { @@ -16279,11 +16314,15 @@ bxe_add_sysctls(struct bxe_softc *sc) CTLFLAG_RW, &sc->debug, "debug logging mode"); - sc->trigger_grcdump = 0; - SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "trigger_grcdump", - CTLFLAG_RW, &sc->trigger_grcdump, 0, + SYSCTL_ADD_PROC(ctx, children, OID_AUTO, "trigger_grcdump", + CTLTYPE_UINT | CTLFLAG_RW, sc, 0, + bxe_sysctl_trigger_grcdump, "IU", "set by driver when a grcdump is needed"); + sc->grcdump_done = 0; + SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "grcdump_done", + CTLFLAG_RW, &sc->grcdump_done, 0, + "set by driver when grcdump is done"); sc->rx_budget = bxe_rx_budget; SYSCTL_ADD_UINT(ctx, children, OID_AUTO, "rx_budget", @@ -18900,26 +18939,6 @@ bxe_get_preset_regs_len(struct bxe_softc *sc, uint32_t preset) return 0; } -static int -bxe_get_max_regs_len(struct bxe_softc *sc) -{ - uint32_t preset_idx; - int regdump_len32, len32; - - regdump_len32 = bxe_get_preset_regs_len(sc, 1); - - /* Calculate the total preset regs length */ - for (preset_idx = 2; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { - - len32 = bxe_get_preset_regs_len(sc, preset_idx); - - if (regdump_len32 < len32) - regdump_len32 = len32; - } - - return regdump_len32; -} - static int bxe_get_total_regs_len32(struct bxe_softc *sc) { @@ -19147,18 +19166,21 @@ bxe_get_preset_regs(struct bxe_softc *sc, uint32_t *p, uint32_t preset) } static int -bxe_grc_dump(struct bxe_softc *sc, bxe_grcdump_t *dump) +bxe_grc_dump(struct bxe_softc *sc) { int rval = 0; uint32_t preset_idx; uint8_t *buf; uint32_t size; struct dump_header *d_hdr; + + if (sc->grcdump_done) + return (rval); ecore_disable_blocks_parity(sc); - buf = dump->grcdump; - d_hdr = dump->grcdump; + buf = sc->grc_dump; + d_hdr = sc->grc_dump; d_hdr->header_size = (sizeof(struct dump_header) >> 2) - 1; d_hdr->version = BNX2X_DUMP_VERSION; @@ -19179,7 +19201,6 @@ bxe_grc_dump(struct bxe_softc *sc, bxe_grcdump_t *dump) (BXE_PATH(sc) ? DUMP_PATH_1 : DUMP_PATH_0); } - dump->grcdump_dwords = sizeof(struct dump_header) >> 2; buf += sizeof(struct dump_header); for (preset_idx = 1; preset_idx <= DUMP_MAX_PRESETS; preset_idx++) { @@ -19196,13 +19217,6 @@ bxe_grc_dump(struct bxe_softc *sc, bxe_grcdump_t *dump) size = bxe_get_preset_regs_len(sc, preset_idx) * (sizeof (uint32_t)); - rval = copyout(sc->grc_dump, buf, size); - - if (rval) - break; - - dump->grcdump_dwords += (size / (sizeof (uint32_t))); - buf += size; } @@ -19216,11 +19230,12 @@ bxe_grc_dump(struct bxe_softc *sc, bxe_grcdump_t *dump) static int bxe_add_cdev(struct bxe_softc *sc) { - int max_preset_size; + int grc_dump_size; - max_preset_size = bxe_get_max_regs_len(sc) * (sizeof (uint32_t)); + grc_dump_size = (bxe_get_total_regs_len32(sc) * sizeof(uint32_t)) + + sizeof(struct dump_header); - sc->grc_dump = malloc(max_preset_size, M_DEVBUF, M_NOWAIT); + sc->grc_dump = malloc(grc_dump_size, M_DEVBUF, M_NOWAIT); if (sc->grc_dump == NULL) return (-1); @@ -19288,12 +19303,13 @@ bxe_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, sizeof(struct dump_header); if ((sc->grc_dump == NULL) || (dump->grcdump == NULL) || - (dump->grcdump_size < grc_dump_size)) { + (dump->grcdump_size < grc_dump_size) || (!sc->grcdump_done)) { rval = EINVAL; break; } - - rval = bxe_grc_dump(sc, dump); + dump->grcdump_dwords = grc_dump_size >> 2; + rval = copyout(sc->grc_dump, dump->grcdump, grc_dump_size); + sc->grcdump_done = 0; break; diff --git a/sys/dev/bxe/bxe.h b/sys/dev/bxe/bxe.h index f954ec0f1a72..ea4006441a91 100644 --- a/sys/dev/bxe/bxe.h +++ b/sys/dev/bxe/bxe.h @@ -1833,7 +1833,6 @@ struct bxe_softc { struct cdev *ioctl_dev; void *grc_dump; - int trigger_grcdump; int grcdump_done; }; /* struct bxe_softc */ @@ -2301,7 +2300,6 @@ void ecore_storm_memset_struct(struct bxe_softc *sc, uint32_t addr, "ERROR: " format, \ ## args); \ } \ - sc->trigger_grcdump |= 0x1; \ } while(0) #ifdef ECORE_STOP_ON_ERROR -- cgit v1.2.3 From dcd95d8a010c87b954128824c40a6cb3b28a3688 Mon Sep 17 00:00:00 2001 From: Jilles Tjoelker Date: Fri, 19 Feb 2016 21:53:12 +0000 Subject: sh: Rework code to remove '\0' from shell input. This fixes bugs where '\0' was not removed correctly and speeds up the parser. --- bin/sh/input.c | 52 ++++++++++++++++++++------------------------ bin/sh/tests/parser/Makefile | 1 + bin/sh/tests/parser/nul1.0 | 12 ++++++++++ 3 files changed, 37 insertions(+), 28 deletions(-) create mode 100644 bin/sh/tests/parser/nul1.0 diff --git a/bin/sh/input.c b/bin/sh/input.c index 412f1442b08c..5921e082da09 100644 --- a/bin/sh/input.c +++ b/bin/sh/input.c @@ -195,8 +195,7 @@ retry: int preadbuffer(void) { - char *p, *q; - int more; + char *p, *q, *r, *end; char savec; while (parsefile->strpush) { @@ -224,34 +223,31 @@ again: } } - q = p = parsefile->buf + (parsenextc - parsefile->buf); - - /* delete nul characters */ - for (more = 1; more;) { - switch (*p) { - case '\0': - p++; /* Skip nul */ - goto check; - - case '\n': - parsenleft = q - parsenextc; - more = 0; /* Stop processing here */ - break; - - default: - break; - } - - *q++ = *p++; -check: - if (--parselleft <= 0) { - parsenleft = q - parsenextc - 1; - if (parsenleft < 0) - goto again; - *q = '\0'; - more = 0; + p = parsefile->buf + (parsenextc - parsefile->buf); + end = p + parselleft; + *end = '\0'; + q = strchrnul(p, '\n'); + if (q != end && *q == '\0') { + /* delete nul characters */ + for (r = q; q != end; q++) { + if (*q != '\0') + *r++ = *q; } + parselleft -= end - r; + if (parselleft == 0) + goto again; + end = p + parselleft; + *end = '\0'; + q = strchrnul(p, '\n'); + } + if (q == end) { + parsenleft = parselleft; + parselleft = 0; + } else /* *q == '\n' */ { + parsenleft = q - parsenextc + 1; + parselleft -= parsenleft; } + parsenleft--; savec = *q; *q = '\0'; diff --git a/bin/sh/tests/parser/Makefile b/bin/sh/tests/parser/Makefile index 78a0734d9be7..eb1e6f567b5c 100644 --- a/bin/sh/tests/parser/Makefile +++ b/bin/sh/tests/parser/Makefile @@ -73,6 +73,7 @@ FILES+= line-cont10.0 FILES+= line-cont11.0 FILES+= no-space1.0 FILES+= no-space2.0 +FILES+= nul1.0 FILES+= only-redir1.0 FILES+= only-redir2.0 FILES+= only-redir3.0 diff --git a/bin/sh/tests/parser/nul1.0 b/bin/sh/tests/parser/nul1.0 new file mode 100644 index 000000000000..49c5ab1b0cfb --- /dev/null +++ b/bin/sh/tests/parser/nul1.0 @@ -0,0 +1,12 @@ +# $FreeBSD$ +# Although POSIX does not specify the effect of NUL bytes in scripts, +# we ignore them. + +{ + printf 'v=%03000d\0%02000d' 7 2 + dd if=/dev/zero bs=1000 count=1 status=none + printf '1 w=%03000d%02000d1\0\n' 7 2 + printf '\0l\0v\0=\0$\0{\0#\0v\0}\n' + printf '\0l\0w\0=\0\0$\0{\0#\0w}\0\0\0\n' + printf '[ "$lv.$lw.$v" = "5001.5001.$w" ]\n' +} | ${SH} -- cgit v1.2.3 From c11f1016ec26976f028c36f3f63221f3391f6fa8 Mon Sep 17 00:00:00 2001 From: Eric Joyner Date: Fri, 19 Feb 2016 21:58:14 +0000 Subject: ixl(4): Fix two important RSS bugs. - Change tc_mapping field to assign 64 queues instead of 16 to the PF's VSI; add comments to describe how this is done. - Set hash lut size to 512 when setting filter control; the lut size defaults to 128 if this isn't set. Differential Revision: https://reviews.freebsd.org/D5203 Reviewed by: gallatin Tested by: jeffrey.e.pieper@intel.com Sponsored by: Intel Corporation --- sys/dev/ixl/if_ixl.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index c3c69d647259..b7804ce5b60b 100644 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -1175,6 +1175,7 @@ ixl_init_locked(struct ixl_pf *pf) #ifdef IXL_FDIR filter.enable_fdir = TRUE; #endif + filter.hash_lut_size = I40E_HASH_LUT_SIZE_512; if (i40e_set_filter_control(hw, &filter)) device_printf(dev, "set_filter_control() failed\n"); @@ -2758,8 +2759,17 @@ ixl_initialize_vsi(struct ixl_vsi *vsi) */ ctxt.info.valid_sections = I40E_AQ_VSI_PROP_QUEUE_MAP_VALID; ctxt.info.mapping_flags |= I40E_AQ_VSI_QUE_MAP_CONTIG; - ctxt.info.queue_mapping[0] = 0; - ctxt.info.tc_mapping[0] = 0x0800; + /* In contig mode, que_mapping[0] is first queue index used by this VSI */ + ctxt.info.queue_mapping[0] = 0; + /* + * This VSI will only use traffic class 0; start traffic class 0's + * queue allocation at queue 0, and assign it 64 (2^6) queues (though + * the driver may not use all of them). + */ + ctxt.info.tc_mapping[0] = ((0 << I40E_AQ_VSI_TC_QUE_OFFSET_SHIFT) + & I40E_AQ_VSI_TC_QUE_OFFSET_MASK) | + ((6 << I40E_AQ_VSI_TC_QUE_NUMBER_SHIFT) + & I40E_AQ_VSI_TC_QUE_NUMBER_MASK); /* Set VLAN receive stripping mode */ ctxt.info.valid_sections |= I40E_AQ_VSI_PROP_VLAN_VALID; -- cgit v1.2.3 From 4ab0941dedab44efe637c4305e1c763d12ac1d45 Mon Sep 17 00:00:00 2001 From: Bryan Drewery Date: Fri, 19 Feb 2016 22:28:45 +0000 Subject: DIRDEPS_BUILD: Enable the post-build footer/stats display. There is no real downside to this and it is useful to have enabled. Sponsored by: EMC / Isilon Storage Division --- share/mk/local.meta.sys.mk | 1 + 1 file changed, 1 insertion(+) diff --git a/share/mk/local.meta.sys.mk b/share/mk/local.meta.sys.mk index 1aa749abd231..21b9c165fbbe 100644 --- a/share/mk/local.meta.sys.mk +++ b/share/mk/local.meta.sys.mk @@ -204,6 +204,7 @@ CSU_DIR := ${CSU_DIR.${MACHINE_ARCH}} .if !empty(TIME_STAMP) TRACER= ${TIME_STAMP} ${:U} .endif +WITH_META_STATS= t # toolchains can be a pain - especially bootstrappping them .if ${MACHINE} == "host" -- cgit v1.2.3 From 300d1454801278e771899077fa136644c7cef561 Mon Sep 17 00:00:00 2001 From: Eric Joyner Date: Fri, 19 Feb 2016 22:33:50 +0000 Subject: ixl(4): Remove unsupported device IDs. There is no official support for 20G SKUs on FreeBSD, and the KX_A device ID was never used. Differential Revision: https://reviews.freebsd.org/D5204 Reviewed by: sbruno, jeffrey.e.pieper@intel.com Sponsored by: Intel Corporation --- sys/dev/ixl/if_ixl.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index b7804ce5b60b..a11200a228a5 100644 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -63,7 +63,6 @@ char ixl_driver_version[] = "1.4.3"; static ixl_vendor_info_t ixl_vendor_info_array[] = { {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_XL710, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_A, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_B, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_KX_C, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_A, 0, 0, 0}, @@ -71,8 +70,6 @@ static ixl_vendor_info_t ixl_vendor_info_array[] = {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_QSFP_C, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_10G_BASE_T4, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2, 0, 0, 0}, - {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_20G_KR2_A, 0, 0, 0}, #ifdef X722_SUPPORT {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_SFP_X722, 0, 0, 0}, {I40E_INTEL_VENDOR_ID, I40E_DEV_ID_1G_BASE_T_X722, 0, 0, 0}, -- cgit v1.2.3 From 42d7398e7eb94326a335055f6f165f310afe5c69 Mon Sep 17 00:00:00 2001 From: Eric Joyner Date: Fri, 19 Feb 2016 22:45:09 +0000 Subject: ixl(4): Fix errors in queue interrupt setup in MSIX mode. - I40E_PFINT_DYN_CTLN needs to be cleared, and not have a queue index written to it. - The interrupt linked list for each queue is changed to only include the queue's Rx and Tx queues. Differential Revision: https://reviews.freebsd.org/D5206 Reviewed by: sbruno Tested by: jeffrey.e.pieper@intel.com Sponsored by: Intel Corporation --- sys/dev/ixl/if_ixl.c | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/sys/dev/ixl/if_ixl.c b/sys/dev/ixl/if_ixl.c index a11200a228a5..f972c5fe2a6e 100644 --- a/sys/dev/ixl/if_ixl.c +++ b/sys/dev/ixl/if_ixl.c @@ -2246,7 +2246,8 @@ ixl_configure_msix(struct ixl_pf *pf) /* Next configure the queues */ for (int i = 0; i < vsi->num_queues; i++, vector++) { - wr32(hw, I40E_PFINT_DYN_CTLN(i), i); + wr32(hw, I40E_PFINT_DYN_CTLN(i), 0); + /* First queue type is RX / type 0 */ wr32(hw, I40E_PFINT_LNKLSTN(i), i); reg = I40E_QINT_RQCTL_CAUSE_ENA_MASK | @@ -2259,11 +2260,8 @@ ixl_configure_msix(struct ixl_pf *pf) reg = I40E_QINT_TQCTL_CAUSE_ENA_MASK | (IXL_TX_ITR << I40E_QINT_TQCTL_ITR_INDX_SHIFT) | (vector << I40E_QINT_TQCTL_MSIX_INDX_SHIFT) | - ((i+1) << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | + (IXL_QUEUE_EOL << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT) | (I40E_QUEUE_TYPE_RX << I40E_QINT_TQCTL_NEXTQ_TYPE_SHIFT); - if (i == (vsi->num_queues - 1)) - reg |= (IXL_QUEUE_EOL - << I40E_QINT_TQCTL_NEXTQ_INDX_SHIFT); wr32(hw, I40E_QINT_TQCTL(i), reg); } } -- cgit v1.2.3 From bb329d4b5acf67a10caf9a156b9ec476c2b7c2d9 Mon Sep 17 00:00:00 2001 From: David C Somayajulu Date: Fri, 19 Feb 2016 22:46:52 +0000 Subject: Remove dead code. Code Cleanup. Improve clarity in debug messages MFC after:5 days --- sys/dev/bxe/bxe.c | 913 ++++++----------------------------------------- sys/dev/bxe/bxe.h | 62 ---- sys/dev/bxe/bxe_stats.c | 192 +--------- sys/dev/bxe/ecore_init.h | 7 - 4 files changed, 110 insertions(+), 1064 deletions(-) diff --git a/sys/dev/bxe/bxe.c b/sys/dev/bxe/bxe.c index ef52731ffe20..8ee2d3de4700 100644 --- a/sys/dev/bxe/bxe.c +++ b/sys/dev/bxe/bxe.c @@ -124,14 +124,6 @@ static struct bxe_device_type bxe_devs[] = { PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57712 MF 10GbE" }, -#if 0 - { - BRCM_VENDORID, - CHIP_NUM_57712_VF, - PCI_ANY_ID, PCI_ANY_ID, - "QLogic NetXtreme II BCM57712 VF 10GbE" - }, -#endif { BRCM_VENDORID, CHIP_NUM_57800, @@ -144,14 +136,6 @@ static struct bxe_device_type bxe_devs[] = { PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57800 MF 10GbE" }, -#if 0 - { - BRCM_VENDORID, - CHIP_NUM_57800_VF, - PCI_ANY_ID, PCI_ANY_ID, - "QLogic NetXtreme II BCM57800 VF 10GbE" - }, -#endif { BRCM_VENDORID, CHIP_NUM_57810, @@ -164,14 +148,6 @@ static struct bxe_device_type bxe_devs[] = { PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57810 MF 10GbE" }, -#if 0 - { - BRCM_VENDORID, - CHIP_NUM_57810_VF, - PCI_ANY_ID, PCI_ANY_ID, - "QLogic NetXtreme II BCM57810 VF 10GbE" - }, -#endif { BRCM_VENDORID, CHIP_NUM_57811, @@ -184,42 +160,18 @@ static struct bxe_device_type bxe_devs[] = { PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57811 MF 10GbE" }, -#if 0 - { - BRCM_VENDORID, - CHIP_NUM_57811_VF, - PCI_ANY_ID, PCI_ANY_ID, - "QLogic NetXtreme II BCM57811 VF 10GbE" - }, -#endif { BRCM_VENDORID, CHIP_NUM_57840_4_10, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 4x10GbE" }, -#if 0 - { - BRCM_VENDORID, - CHIP_NUM_57840_2_20, - PCI_ANY_ID, PCI_ANY_ID, - "QLogic NetXtreme II BCM57840 2x20GbE" - }, -#endif { BRCM_VENDORID, CHIP_NUM_57840_MF, PCI_ANY_ID, PCI_ANY_ID, "QLogic NetXtreme II BCM57840 MF 10GbE" }, -#if 0 - { - BRCM_VENDORID, - CHIP_NUM_57840_VF, - PCI_ANY_ID, PCI_ANY_ID, - "QLogic NetXtreme II BCM57840 VF 10GbE" - }, -#endif { 0, 0, 0, 0, NULL } @@ -245,10 +197,6 @@ static device_method_t bxe_methods[] = { DEVMETHOD(device_attach, bxe_attach), DEVMETHOD(device_detach, bxe_detach), DEVMETHOD(device_shutdown, bxe_shutdown), -#if 0 - DEVMETHOD(device_suspend, bxe_suspend), - DEVMETHOD(device_resume, bxe_resume), -#endif /* Bus interface (bus_if.h) */ DEVMETHOD(bus_print_child, bus_generic_print_child), DEVMETHOD(bus_driver_added, bus_generic_driver_added), @@ -458,12 +406,6 @@ static const struct { 8, STATS_FLAGS_FUNC, "tpa_aggregated_frames"}, { STATS_OFFSET32(total_tpa_bytes_hi), 8, STATS_FLAGS_FUNC, "tpa_bytes"}, -#if 0 - { STATS_OFFSET32(recoverable_error), - 4, STATS_FLAGS_FUNC, "recoverable_errors" }, - { STATS_OFFSET32(unrecoverable_error), - 4, STATS_FLAGS_FUNC, "unrecoverable_errors" }, -#endif { STATS_OFFSET32(eee_tx_lpi), 4, STATS_FLAGS_PORT, "eee_tx_lpi"}, { STATS_OFFSET32(rx_calls), @@ -516,12 +458,6 @@ static const struct { 4, STATS_FLAGS_FUNC, "tx_window_violation_std"}, { STATS_OFFSET32(tx_window_violation_tso), 4, STATS_FLAGS_FUNC, "tx_window_violation_tso"}, -#if 0 - { STATS_OFFSET32(tx_unsupported_tso_request_ipv6), - 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_ipv6"}, - { STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), - 4, STATS_FLAGS_FUNC, "tx_unsupported_tso_request_not_tcp"}, -#endif { STATS_OFFSET32(tx_chain_lost_mbuf), 4, STATS_FLAGS_FUNC, "tx_chain_lost_mbuf"}, { STATS_OFFSET32(tx_frames_deferred), @@ -633,12 +569,6 @@ static const struct { 4, "tx_window_violation_std"}, { Q_STATS_OFFSET32(tx_window_violation_tso), 4, "tx_window_violation_tso"}, -#if 0 - { Q_STATS_OFFSET32(tx_unsupported_tso_request_ipv6), - 4, "tx_unsupported_tso_request_ipv6"}, - { Q_STATS_OFFSET32(tx_unsupported_tso_request_not_tcp), - 4, "tx_unsupported_tso_request_not_tcp"}, -#endif { Q_STATS_OFFSET32(tx_chain_lost_mbuf), 4, "tx_chain_lost_mbuf"}, { Q_STATS_OFFSET32(tx_frames_deferred), @@ -906,12 +836,6 @@ bxe_dma_map_addr(void *arg, bus_dma_segment_t *segs, int nseg, int error) } else { dma->paddr = segs->ds_addr; dma->nseg = nseg; -#if 0 - BLOGD(dma->sc, DBG_LOAD, - "DMA alloc '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", - dma->msg, dma->vaddr, (void *)dma->paddr, - dma->nseg, dma->size); -#endif } } @@ -996,13 +920,6 @@ bxe_dma_free(struct bxe_softc *sc, struct bxe_dma *dma) { if (dma->size > 0) { -#if 0 - BLOGD(sc, DBG_LOAD, - "DMA free '%s': vaddr=%p paddr=%p nseg=%d size=%lu\n", - dma->msg, dma->vaddr, (void *)dma->paddr, - dma->nseg, dma->size); -#endif - DBASSERT(sc, (dma->tag != NULL), ("dma tag is NULL")); bus_dmamap_sync(dma->tag, dma->map, @@ -1043,69 +960,6 @@ bxe_reg_rd_ind(struct bxe_softc *sc, return (val); } -#if 0 -void bxe_dp_dmae(struct bxe_softc *sc, struct dmae_command *dmae, int msglvl) -{ - uint32_t src_type = dmae->opcode & DMAE_COMMAND_SRC; - - switch (dmae->opcode & DMAE_COMMAND_DST) { - case DMAE_CMD_DST_PCI: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%x:%08x], len [%d*4], dst [%x:%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%08x], len [%d*4], dst [%x:%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - case DMAE_CMD_DST_GRC: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%x:%08x], len [%d*4], dst_addr [%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->dst_addr_lo >> 2, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src [%08x], len [%d*4], dst [%08x]\n" - "comp_addr [%x:%08x], comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->dst_addr_lo >> 2, - dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - default: - if (src_type == DMAE_CMD_SRC_PCI) - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src_addr [%x:%08x] len [%d * 4] dst_addr [none]\n" - "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo, - dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - else - DP(msglvl, "DMAE: opcode 0x%08x\n" - "src_addr [%08x] len [%d * 4] dst_addr [none]\n" - "comp_addr [%x:%08x] comp_val 0x%08x\n", - dmae->opcode, dmae->src_addr_lo >> 2, - dmae->len, dmae->comp_addr_hi, dmae->comp_addr_lo, - dmae->comp_val); - break; - } - -} -#endif - static int bxe_acquire_hw_lock(struct bxe_softc *sc, uint32_t resource) @@ -1118,7 +972,8 @@ bxe_acquire_hw_lock(struct bxe_softc *sc, /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); + BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" + " resource_bit 0x%x\n", resource, resource_bit); return (-1); } @@ -1132,8 +987,8 @@ bxe_acquire_hw_lock(struct bxe_softc *sc, /* validate the resource is not already taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (lock_status & resource_bit) { - BLOGE(sc, "resource in use (status 0x%x bit 0x%x)\n", - lock_status, resource_bit); + BLOGE(sc, "resource (0x%x) in use (status 0x%x bit 0x%x)\n", + resource, lock_status, resource_bit); return (-1); } @@ -1147,7 +1002,8 @@ bxe_acquire_hw_lock(struct bxe_softc *sc, DELAY(5000); } - BLOGE(sc, "Resource lock timeout!\n"); + BLOGE(sc, "Resource 0x%x resource_bit 0x%x lock timeout!\n", + resource, resource_bit); return (-1); } @@ -1162,7 +1018,8 @@ bxe_release_hw_lock(struct bxe_softc *sc, /* validate the resource is within range */ if (resource > HW_LOCK_MAX_RESOURCE_VALUE) { - BLOGE(sc, "resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE\n", resource); + BLOGE(sc, "(resource 0x%x > HW_LOCK_MAX_RESOURCE_VALUE)" + " resource_bit 0x%x\n", resource, resource_bit); return (-1); } @@ -1176,8 +1033,8 @@ bxe_release_hw_lock(struct bxe_softc *sc, /* validate the resource is currently taken */ lock_status = REG_RD(sc, hw_lock_control_reg); if (!(lock_status & resource_bit)) { - BLOGE(sc, "resource not in use (status 0x%x bit 0x%x)\n", - lock_status, resource_bit); + BLOGE(sc, "resource (0x%x) not in use (status 0x%x bit 0x%x)\n", + resource, lock_status, resource_bit); return (-1); } @@ -1239,7 +1096,9 @@ bxe_acquire_nvram_lock(struct bxe_softc *sc) } if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) { - BLOGE(sc, "Cannot get access to nvram interface\n"); + BLOGE(sc, "Cannot get access to nvram interface " + "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", + port, val); return (-1); } @@ -1273,7 +1132,9 @@ bxe_release_nvram_lock(struct bxe_softc *sc) } if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) { - BLOGE(sc, "Cannot free access to nvram interface\n"); + BLOGE(sc, "Cannot free access to nvram interface " + "port %d val 0x%x (MCPR_NVM_SW_ARB_ARB_ARB1 << port)\n", + port, val); return (-1); } @@ -1356,7 +1217,9 @@ bxe_nvram_read_dword(struct bxe_softc *sc, } if (rc == -1) { - BLOGE(sc, "nvram read timeout expired\n"); + BLOGE(sc, "nvram read timeout expired " + "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", + offset, cmd_flags, val); } return (rc); @@ -1462,7 +1325,9 @@ bxe_nvram_write_dword(struct bxe_softc *sc, } if (rc == -1) { - BLOGE(sc, "nvram write timeout expired\n"); + BLOGE(sc, "nvram write timeout expired " + "(offset 0x%x cmd_flags 0x%x val 0x%x)\n", + offset, cmd_flags, val); } return (rc); @@ -1696,7 +1561,8 @@ bxe_issue_dmae_with_comp(struct bxe_softc *sc, if (!timeout || (sc->recovery_state != BXE_RECOVERY_DONE && sc->recovery_state != BXE_RECOVERY_NIC_LOADING)) { - BLOGE(sc, "DMAE timeout!\n"); + BLOGE(sc, "DMAE timeout! *wb_comp 0x%x recovery_state 0x%x\n", + *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_TIMEOUT); } @@ -1706,7 +1572,8 @@ bxe_issue_dmae_with_comp(struct bxe_softc *sc, } if (*wb_comp & DMAE_PCI_ERR_FLAG) { - BLOGE(sc, "DMAE PCI error!\n"); + BLOGE(sc, "DMAE PCI error! *wb_comp 0x%x recovery_state 0x%x\n", + *wb_comp, sc->recovery_state); BXE_DMAE_UNLOCK(sc); return (DMAE_PCI_ERROR); } @@ -1941,12 +1808,6 @@ elink_cb_event_log(struct bxe_softc *sc, ...) { /* XXX */ -#if 0 - //va_list ap; - va_start(ap, elink_log_id); - _XXX_(sc, lm_log_id, ap); - va_end(ap); -#endif BLOGI(sc, "ELINK EVENT LOG (%d)\n", elink_log_id); } @@ -1959,7 +1820,7 @@ bxe_set_spio(struct bxe_softc *sc, /* Only 2 SPIOs are configurable */ if ((spio != MISC_SPIO_SPIO4) && (spio != MISC_SPIO_SPIO5)) { - BLOGE(sc, "Invalid SPIO 0x%x\n", spio); + BLOGE(sc, "Invalid SPIO 0x%x mode 0x%x\n", spio, mode); return (-1); } @@ -2013,7 +1874,9 @@ bxe_gpio_read(struct bxe_softc *sc, uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { - BLOGE(sc, "Invalid GPIO %d\n", gpio_num); + BLOGE(sc, "Invalid GPIO %d port 0x%x gpio_port %d gpio_shift %d" + " gpio_mask 0x%x\n", gpio_num, port, gpio_port, gpio_shift, + gpio_mask); return (-1); } @@ -2039,7 +1902,9 @@ bxe_gpio_write(struct bxe_softc *sc, uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { - BLOGE(sc, "Invalid GPIO %d\n", gpio_num); + BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" + " gpio_shift %d gpio_mask 0x%x\n", + gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } @@ -2122,7 +1987,8 @@ bxe_gpio_mult_write(struct bxe_softc *sc, break; default: - BLOGE(sc, "Invalid GPIO mode assignment %d\n", mode); + BLOGE(sc, "Invalid GPIO mode assignment pins 0x%x mode 0x%x" + " gpio_reg 0x%x\n", pins, mode, gpio_reg); bxe_release_hw_lock(sc, HW_LOCK_RESOURCE_GPIO); return (-1); } @@ -2148,7 +2014,9 @@ bxe_gpio_int_write(struct bxe_softc *sc, uint32_t gpio_reg; if (gpio_num > MISC_REGISTERS_GPIO_3) { - BLOGE(sc, "Invalid GPIO %d\n", gpio_num); + BLOGE(sc, "Invalid GPIO %d mode 0x%x port 0x%x gpio_port %d" + " gpio_shift %d gpio_mask 0x%x\n", + gpio_num, mode, port, gpio_port, gpio_shift, gpio_mask); return (-1); } @@ -2567,29 +2435,6 @@ bxe_sp_post(struct bxe_softc *sc, * @sc: driver hanlde * @p: pointer to rss configuration */ -#if 0 -static void -bxe_debug_print_ind_table(struct bxe_softc *sc, - struct ecore_config_rss_params *p) -{ - int i; - - BLOGD(sc, DBG_LOAD, "Setting indirection table to:\n"); - BLOGD(sc, DBG_LOAD, " 0x0000: "); - for (i = 0; i < T_ETH_INDIRECTION_TABLE_SIZE; i++) { - BLOGD(sc, DBG_LOAD, "0x%02x ", p->ind_table[i]); - - /* Print 4 bytes in a line */ - if ((i + 1 < T_ETH_INDIRECTION_TABLE_SIZE) && - (((i + 1) & 0x3) == 0)) { - BLOGD(sc, DBG_LOAD, "\n"); - BLOGD(sc, DBG_LOAD, "0x%04x: ", i + 1); - } - } - - BLOGD(sc, DBG_LOAD, "\n"); -} -#endif /* * FreeBSD Device probe function. @@ -2764,13 +2609,6 @@ bxe_tx_avail(struct bxe_softc *sc, used = SUB_S16(prod, cons); -#if 0 - KASSERT((used < 0), ("used tx bds < 0")); - KASSERT((used > sc->tx_ring_size), ("used tx bds > tx_ring_size")); - KASSERT(((sc->tx_ring_size - used) > MAX_TX_AVAIL), - ("invalid number of tx bds used")); -#endif - return (int16_t)(sc->tx_ring_size) - used; } @@ -2816,16 +2654,6 @@ bxe_sp_event(struct bxe_softc *sc, BLOGD(sc, DBG_SP, "fp=%d cid=%d got ramrod #%d state is %x type is %d\n", fp->index, cid, command, sc->state, rr_cqe->ramrod_cqe.ramrod_type); -#if 0 - /* - * If cid is within VF range, replace the slowpath object with the - * one corresponding to this VF - */ - if ((cid >= BXE_FIRST_VF_CID) && (cid < BXE_FIRST_VF_CID + BXE_VF_CIDS)) { - bxe_iov_set_queue_sp_obj(sc, cid, &q_obj); - } -#endif - switch (command) { case (RAMROD_CMD_ID_ETH_CLIENT_UPDATE): BLOGD(sc, DBG_SP, "got UPDATE ramrod. CID %d\n", cid); @@ -2877,34 +2705,10 @@ bxe_sp_event(struct bxe_softc *sc, return; } -#if 0 - /* SRIOV: reschedule any 'in_progress' operations */ - bxe_iov_sp_event(sc, cid, TRUE); -#endif - atomic_add_acq_long(&sc->cq_spq_left, 1); BLOGD(sc, DBG_SP, "sc->cq_spq_left 0x%lx\n", atomic_load_acq_long(&sc->cq_spq_left)); - -#if 0 - if ((drv_cmd == ECORE_Q_CMD_UPDATE) && (IS_FCOE_FP(fp)) && - (!!bxe_test_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state))) { - /* - * If Queue update ramrod is completed for last Queue in AFEX VIF set - * flow, then ACK MCP at the end. Mark pending ACK to MCP bit to - * prevent case that both bits are cleared. At the end of load/unload - * driver checks that sp_state is cleared and this order prevents - * races. - */ - bxe_set_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, &sc->sp_state); - wmb(); - bxe_clear_bit(ECORE_AFEX_FCOE_Q_UPDATE_PENDING, &sc->sp_state); - - /* schedule the sp task as MCP ack is required */ - bxe_schedule_sp_task(sc); - } -#endif } /* @@ -2945,8 +2749,15 @@ bxe_tpa_start(struct bxe_softc *sc, tmp_bd = tpa_info->bd; if (tmp_bd.m == NULL) { - BLOGE(sc, "fp[%02d].tpa[%02d] mbuf not allocated!\n", - fp->index, queue); + uint32_t *tmp; + + tmp = (uint32_t *)cqe; + + BLOGE(sc, "fp[%02d].tpa[%02d] cons[%d] prod[%d]mbuf not allocated!\n", + fp->index, queue, cons, prod); + BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", + *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); + /* XXX Error handling? */ return; } @@ -3027,10 +2838,17 @@ bxe_fill_frag_mbuf(struct bxe_softc *sc, /* make sure the aggregated frame is not too big to handle */ if (pages > 8 * PAGES_PER_SGE) { + + uint32_t *tmp = (uint32_t *)cqe; + BLOGE(sc, "fp[%02d].sge[0x%04x] has too many pages (%d)! " "pkt_len=%d len_on_bd=%d frag_size=%d\n", fp->index, cqe_idx, pages, le16toh(cqe->pkt_len), tpa_info->len_on_bd, frag_size); + + BLOGE(sc, "cqe [0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x 0x%08x]\n", + *tmp, *(tmp+1), *(tmp+2), *(tmp+3), *(tmp+4), *(tmp+5), *(tmp+6), *(tmp+7)); + bxe_panic(sc, ("sge page count error\n")); return (EINVAL); } @@ -3391,15 +3209,6 @@ bxe_rxeof(struct bxe_softc *sc, uint16_t frag_size, pages; uint8_t queue; -#if 0 - /* sanity check */ - if (!fp->tpa_enable && - (CQE_TYPE_START(cqe_fp_type) || CQE_TYPE_STOP(cqe_fp_type))) { - BLOGE(sc, "START/STOP packet while !tpa_enable type (0x%x)\n", - CQE_TYPE(cqe_fp_type)); - } -#endif - if (CQE_TYPE_START(cqe_fp_type)) { bxe_tpa_start(sc, fp, cqe_fp->queue_index, bd_cons, bd_prod, cqe_fp); @@ -3605,44 +3414,8 @@ bxe_free_tx_pkt(struct bxe_softc *sc, tx_start_bd = &fp->tx_chain[bd_idx].start_bd; nbd = le16toh(tx_start_bd->nbd) - 1; -#if 0 - if ((nbd - 1) > (MAX_MBUF_FRAGS + 2)) { - bxe_panic(sc, ("BAD nbd!\n")); - } -#endif - new_cons = (tx_buf->first_bd + nbd); -#if 0 - struct eth_tx_bd *tx_data_bd; - - /* - * The following code doesn't do anything but is left here - * for clarity on what the new value of new_cons skipped. - */ - - /* get the next bd */ - bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); - - /* skip the parse bd */ - --nbd; - bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); - - /* skip the TSO split header bd since they have no mapping */ - if (tx_buf->flags & BXE_TSO_SPLIT_BD) { - --nbd; - bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); - } - - /* now free frags */ - while (nbd > 0) { - tx_data_bd = &fp->tx_chain[bd_idx].reg_bd; - if (--nbd) { - bd_idx = TX_BD(TX_BD_NEXT(bd_idx)); - } - } -#endif - /* free the mbuf */ if (__predict_true(tx_buf->m != NULL)) { m_freem(tx_buf->m); @@ -3787,7 +3560,8 @@ bxe_del_all_macs(struct bxe_softc *sc, rc = mac_obj->delete_all(sc, mac_obj, &vlan_mac_flags, &ramrod_flags); if (rc < 0) { - BLOGE(sc, "Failed to delete MACs (%d)\n", rc); + BLOGE(sc, "Failed to delete MACs (%d) mac_type %d wait_for_comp 0x%x\n", + rc, mac_type, wait_for_comp); } return (rc); @@ -3859,7 +3633,7 @@ bxe_fill_accept_flags(struct bxe_softc *sc, break; default: - BLOGE(sc, "Unknown rx_mode (%d)\n", rx_mode); + BLOGE(sc, "Unknown rx_mode (0x%x)\n", rx_mode); return (-1); } @@ -3907,7 +3681,11 @@ bxe_set_q_rx_mode(struct bxe_softc *sc, rc = ecore_config_rx_mode(sc, &ramrod_param); if (rc < 0) { - BLOGE(sc, "Set rx_mode %d failed\n", sc->rx_mode); + BLOGE(sc, "Set rx_mode %d cli_id 0x%x rx_mode_flags 0x%x " + "rx_accept_flags 0x%x tx_accept_flags 0x%x " + "ramrod_flags 0x%x rc %d failed\n", sc->rx_mode, cl_id, + (uint32_t)rx_mode_flags, (uint32_t)rx_accept_flags, + (uint32_t)tx_accept_flags, (uint32_t)ramrod_flags, rc); return (rc); } @@ -3990,52 +3768,11 @@ bxe_send_unload_req(struct bxe_softc *sc, int unload_mode) { uint32_t reset_code = 0; -#if 0 - int port = SC_PORT(sc); - int path = SC_PATH(sc); -#endif /* Select the UNLOAD request mode */ if (unload_mode == UNLOAD_NORMAL) { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; - } -#if 0 - else if (sc->flags & BXE_NO_WOL_FLAG) { - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP; - } else if (sc->wol) { - uint32_t emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; - uint8_t *mac_addr = sc->dev->dev_addr; - uint32_t val; - uint16_t pmc; - - /* - * The mac address is written to entries 1-4 to - * preserve entry 0 which is used by the PMF - */ - uint8_t entry = (SC_VN(sc) + 1)*8; - - val = (mac_addr[0] << 8) | mac_addr[1]; - EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry, val); - - val = (mac_addr[2] << 24) | (mac_addr[3] << 16) | - (mac_addr[4] << 8) | mac_addr[5]; - EMAC_WR(sc, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val); - - /* Enable the PME and clear the status */ - pmc = pci_read_config(sc->dev, - (sc->devinfo.pcie_pm_cap_reg + - PCIR_POWER_STATUS), - 2); - pmc |= PCIM_PSTAT_PMEENABLE | PCIM_PSTAT_PME; - pci_write_config(sc->dev, - (sc->devinfo.pcie_pm_cap_reg + - PCIR_POWER_STATUS), - pmc, 4); - - reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN; - } -#endif - else { + } else { reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; } @@ -4210,7 +3947,7 @@ bxe_func_stop(struct bxe_softc *sc) rc = ecore_func_state_change(sc, &func_params); if (rc) { BLOGE(sc, "FUNC_STOP ramrod failed. " - "Running a dry transaction\n"); + "Running a dry transaction (%d)\n", rc); bxe_set_bit(RAMROD_DRV_CLR_ONLY, &func_params.ramrod_flags); return (ecore_func_state_change(sc, &func_params)); } @@ -4321,7 +4058,7 @@ bxe_chip_cleanup(struct bxe_softc *sc, */ rc = bxe_func_wait_started(sc); if (rc) { - BLOGE(sc, "bxe_func_wait_started failed\n"); + BLOGE(sc, "bxe_func_wait_started failed (%d)\n", rc); } /* @@ -4339,14 +4076,14 @@ bxe_chip_cleanup(struct bxe_softc *sc, * very wrong has happen. */ if (!bxe_wait_sp_comp(sc, ~0x0UL)) { - BLOGE(sc, "Common slow path ramrods got stuck!\n"); + BLOGE(sc, "Common slow path ramrods got stuck!(%d)\n", rc); } unload_error: rc = bxe_func_stop(sc); if (rc) { - BLOGE(sc, "Function stop failed!\n"); + BLOGE(sc, "Function stop failed!(%d)\n", rc); } /* disable HW interrupts */ @@ -4358,7 +4095,7 @@ unload_error: /* Reset the chip */ rc = bxe_reset_hw(sc, reset_code); if (rc) { - BLOGE(sc, "Hardware reset failed\n"); + BLOGE(sc, "Hardware reset failed(%d)\n", rc); } /* Report UNLOAD_DONE to MCP */ @@ -4484,7 +4221,8 @@ bxe_nic_unload(struct bxe_softc *sc, mb(); BLOGD(sc, DBG_LOAD, "Releasing a leadership...\n"); - BLOGE(sc, "Can't unload in closed or error state\n"); + BLOGE(sc, "Can't unload in closed or error state recover_state 0x%x" + " state = 0x%x\n", sc->recovery_state, sc->state); return (-1); } @@ -4691,7 +4429,8 @@ bxe_ioctl_nvram(struct bxe_softc *sc, if ((nvdata = (struct bxe_nvram_data *) malloc(len, M_DEVBUF, (M_NOWAIT | M_ZERO))) == NULL) { - BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed\n"); + BLOGE(sc, "BXE_IOC_RD_NVRAM malloc failed priv_op 0x%x " + " len = 0x%x\n", priv_op, len); return (1); } memcpy(nvdata, &nvdata_base, sizeof(struct bxe_nvram_data)); @@ -5387,11 +5126,6 @@ bxe_set_pbd_lso_e2(struct mbuf *m, ETH_TX_PARSE_BD_E2_LSO_MSS); /* XXX test for IPv6 with extension header... */ -#if 0 - struct ip6_hdr *ip6; - if (ip6 && ip6->ip6_nxt == 'some ipv6 extension header') - *parsing_data |= ETH_TX_PARSE_BD_E2_IPV6_WITH_EXT_HDR; -#endif } static void @@ -5652,17 +5386,6 @@ bxe_tx_encap_continue: } else { /* used by FW for packet accounting */ tx_start_bd->vlan_or_ethertype = htole16(fp->tx_pkt_prod); -#if 0 - /* - * If NPAR-SD is active then FW should do the tagging regardless - * of value of priority. Otherwise, if priority indicates this is - * a control packet we need to indicate to FW to avoid tagging. - */ - if (!IS_MF_AFEX(sc) && (mbuf priority == PRIO_CONTROL)) { - SET_FLAG(tx_start_bd->general_data, - ETH_TX_START_BD_FORCE_VLAN_MODE, 1); - } -#endif } } @@ -5702,25 +5425,6 @@ bxe_tx_encap_continue: hlen = bxe_set_pbd_csum_e2(fp, m0, &pbd_e2_parsing_data); } -#if 0 - /* - * Add the MACs to the parsing BD if the module param was - * explicitly set, if this is a vf, or in switch independent - * mode. - */ - if (sc->flags & BXE_TX_SWITCHING || IS_VF(sc) || IS_MF_SI(sc)) { - eh = mtod(m0, struct ether_vlan_header *); - bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.src_hi, - &pbd_e2->data.mac_addr.src_mid, - &pbd_e2->data.mac_addr.src_lo, - eh->evl_shost); - bxe_set_fw_mac_addr(&pbd_e2->data.mac_addr.dst_hi, - &pbd_e2->data.mac_addr.dst_mid, - &pbd_e2->data.mac_addr.dst_lo, - eh->evl_dhost); - } -#endif - SET_FLAG(pbd_e2_parsing_data, ETH_TX_PARSE_BD_E2_ETH_ADDR_TYPE, mac_type); } else { @@ -6347,13 +6051,6 @@ bxe_free_mem(struct bxe_softc *sc) { int i; -#if 0 - if (!CONFIGURE_NIC_MODE(sc)) { - /* free searcher T2 table */ - bxe_dma_free(sc, &sc->t2); - } -#endif - for (i = 0; i < L2_ILT_LINES(sc); i++) { bxe_dma_free(sc, &sc->context[i].vcxt_dma); sc->context[i].vcxt = NULL; @@ -6364,9 +6061,6 @@ bxe_free_mem(struct bxe_softc *sc) bxe_free_ilt_lines_mem(sc); -#if 0 - bxe_iov_free_mem(sc); -#endif } static int @@ -6376,16 +6070,6 @@ bxe_alloc_mem(struct bxe_softc *sc) int allocated; int i; -#if 0 - if (!CONFIGURE_NIC_MODE(sc)) { - /* allocate searcher T2 table */ - if (bxe_dma_alloc(sc, SRC_T2_SZ, - &sc->t2, "searcher t2 table") != 0) { - return (-1); - } - } -#endif - /* * Allocate memory for CDU context: * This memory is allocated separately and not in the generic ILT @@ -6440,14 +6124,6 @@ bxe_alloc_mem(struct bxe_softc *sc) return (-1); } -#if 0 - if (bxe_iov_alloc_mem(sc)) { - BLOGE(sc, "Failed to allocate memory for SRIOV\n"); - bxe_free_mem(sc); - return (-1); - } -#endif - return (0); } @@ -8325,27 +8001,9 @@ bxe_attn_int_deasserted3(struct bxe_softc *sc, if (val & DRV_STATUS_DRV_INFO_REQ) bxe_handle_drv_info_req(sc); -#if 0 - if (val & DRV_STATUS_VF_DISABLED) - bxe_vf_handle_flr_event(sc); -#endif - if ((sc->port.pmf == 0) && (val & DRV_STATUS_PMF)) bxe_pmf_update(sc); -#if 0 - if (sc->port.pmf && - (val & DRV_STATUS_DCBX_NEGOTIATION_RESULTS) && - (sc->dcbx_enabled > 0)) - /* start dcbx state machine */ - bxe_dcbx_set_params(sc, BXE_DCBX_STATE_NEG_RECEIVED); -#endif - -#if 0 - if (val & DRV_STATUS_AFEX_EVENT_MASK) - bxe_handle_afex_cmd(sc, val & DRV_STATUS_AFEX_EVENT_MASK); -#endif - if (val & DRV_STATUS_EEE_NEGOTIATION_RESULTS) bxe_handle_eee_event(sc); @@ -8746,8 +8404,7 @@ bxe_handle_mcast_eqe(struct bxe_softc *sc) rc = ecore_config_mcast(sc, &rparam, ECORE_MCAST_CMD_CONT); if (rc < 0) { BLOGD(sc, DBG_SP, - "ERROR: Failed to send pending mcast commands (%d)\n", - rc); + "ERROR: Failed to send pending mcast commands (%d)\n", rc); } } @@ -8807,16 +8464,6 @@ bxe_handle_rx_mode_eqe(struct bxe_softc *sc, &sc->sp_state)) { bxe_set_storm_rx_mode(sc); } -#if 0 - else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_START_SCHED, - &sc->sp_state)) { - bxe_set_iscsi_eth_rx_mode(sc, TRUE); - } - else if (bxe_test_and_clear_bit(ECORE_FILTER_ISCSI_ETH_STOP_SCHED, - &sc->sp_state)) { - bxe_set_iscsi_eth_rx_mode(sc, FALSE); - } -#endif } static void @@ -8868,27 +8515,12 @@ bxe_eq_int(struct bxe_softc *sc) elem = &sc->eq[EQ_DESC(sw_cons)]; -#if 0 - int rc; - rc = bxe_iov_eq_sp_event(sc, elem); - if (!rc) { - BLOGE(sc, "bxe_iov_eq_sp_event returned %d\n", rc); - goto next_spqe; - } -#endif - /* elem CID originates from FW, actually LE */ cid = SW_CID(elem->message.data.cfc_del_event.cid); opcode = elem->message.opcode; /* handle eq element */ switch (opcode) { -#if 0 - case EVENT_RING_OPCODE_VF_PF_CHANNEL: - BLOGD(sc, DBG_SP, "vf/pf channel element on eq\n"); - bxe_vf_mbx(sc, &elem->message.data.vf_pf_event); - continue; -#endif case EVENT_RING_OPCODE_STAT_QUERY: BLOGD(sc, DBG_SP, "got statistics completion event %d\n", @@ -8934,25 +8566,9 @@ bxe_eq_int(struct bxe_softc *sc) else { BLOGD(sc, DBG_SP, "AFEX: ramrod completed FUNCTION_UPDATE\n"); -#if 0 - f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_UPDATE); - /* - * We will perform the queues update from the sp_core_task as - * all queue SP operations should run with CORE_LOCK. - */ - bxe_set_bit(BXE_SP_CORE_AFEX_F_UPDATE, &sc->sp_core_state); - taskqueue_enqueue(sc->sp_tq, &sc->sp_tq_task); -#endif } goto next_spqe; -#if 0 - case EVENT_RING_OPCODE_AFEX_VIF_LISTS: - f_obj->complete_cmd(sc, f_obj, ECORE_F_CMD_AFEX_VIFLISTS); - bxe_after_afex_vif_lists(sc, elem); - goto next_spqe; -#endif - case EVENT_RING_OPCODE_FORWARD_SETUP: q_obj = &bxe_fwd_sp_obj(sc, q_obj); if (q_obj->complete_cmd(sc, q_obj, @@ -9079,14 +8695,6 @@ bxe_handle_sp_tq(void *context, */ // XXX bxe_iov_sp_task(sc); -#if 0 - /* AFEX - poll to check if VIFSET_ACK should be sent to MFW */ - if (bxe_test_and_clear_bit(ECORE_AFEX_PENDING_VIFSET_MCP_ACK, - &sc->sp_state)) { - bxe_link_report(sc); - bxe_fw_command(sc, DRV_MSG_CODE_AFEX_VIFSET_ACK, 0); - } -#endif } static void @@ -9195,13 +8803,6 @@ bxe_intr_legacy(void *xsc) BLOGD(sc, DBG_INTR, "---> BXE INTx <---\n"); -#if 0 - /* Don't handle any interrupts if we're not ready. */ - if (__predict_false(sc->intr_sem != 0)) { - return; - } -#endif - /* * 0 for ustorm, 1 for cstorm * the bits returned from ack_int() are 0-15 @@ -9232,16 +8833,6 @@ bxe_intr_legacy(void *xsc) } } -#if 0 - if (CNIC_SUPPORT(sc)) { - mask = 0x2; - if (status & (mask | 0x1)) { - ... - status &= ~mask; - } - } -#endif - if (__predict_false(status & 0x1)) { /* acknowledge and disable further slowpath interrupts */ bxe_ack_sb(sc, sc->igu_dsb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); @@ -9285,13 +8876,6 @@ bxe_intr_fp(void *xfp) "(cpu=%d) MSI-X fp=%d fw_sb=%d igu_sb=%d\n", curcpu, fp->index, fp->fw_sb_id, fp->igu_sb_id); -#if 0 - /* Don't handle any interrupts if we're not ready. */ - if (__predict_false(sc->intr_sem != 0)) { - return; - } -#endif - /* acknowledge and disable further fastpath interrupts */ bxe_ack_sb(sc, fp->igu_sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0); @@ -10010,13 +9594,6 @@ bxe_rx_ustorm_prods_offset(struct bxe_softc *sc, { uint32_t offset = BAR_USTRORM_INTMEM; -#if 0 - if (IS_VF(sc)) { - return (PXP_VF_ADDR_USDM_QUEUES_START + - (sc->acquire_resp.resc.hw_qid[fp->index] * - sizeof(struct ustorm_queue_zone_data))); - } else -#endif if (!CHIP_IS_E1x(sc)) { offset += USTORM_RX_PRODS_E2_OFFSET(fp->cl_qzone_id); } else { @@ -10215,14 +9792,7 @@ bxe_init_tx_rings(struct bxe_softc *sc) int i; for (i = 0; i < sc->num_queues; i++) { -#if 0 - uint8_t cos; - for (cos = 0; cos < sc->max_cos; cos++) { - bxe_init_tx_ring_one(&sc->fp[i].txdata[cos]); - } -#else bxe_init_tx_ring_one(&sc->fp[i]); -#endif } } @@ -11091,7 +10661,8 @@ bxe_set_power_state(struct bxe_softc *sc, break; default: - BLOGE(sc, "Can't support PCI power state = %d\n", state); + BLOGE(sc, "Can't support PCI power state = 0x%x pmcsr 0x%x\n", + state, pmcsr); return (-1); } @@ -11132,7 +10703,9 @@ bxe_trylock_hw_lock(struct bxe_softc *sc, return (TRUE); } - BLOGE(sc, "Failed to get a resource lock 0x%x\n", resource); + BLOGE(sc, "Failed to get a resource lock 0x%x func %d " + "lock_status 0x%x resource_bit 0x%x\n", resource, func, + lock_status, resource_bit); return (FALSE); } @@ -11720,10 +11293,6 @@ bxe_get_q_flags(struct bxe_softc *sc, if (if_getcapenable(sc->ifp) & IFCAP_LRO) { bxe_set_bit(ECORE_Q_FLG_TPA, &flags); bxe_set_bit(ECORE_Q_FLG_TPA_IPV6, &flags); -#if 0 - if (fp->mode == TPA_MODE_GRO) - __set_bit(ECORE_Q_FLG_TPA_GRO, &flags); -#endif } if (leading) { @@ -11733,13 +11302,6 @@ bxe_get_q_flags(struct bxe_softc *sc, bxe_set_bit(ECORE_Q_FLG_VLAN, &flags); -#if 0 - /* configure silent vlan removal */ - if (IS_MF_AFEX(sc)) { - bxe_set_bit(ECORE_Q_FLG_SILENT_VLAN_REM, &flags); - } -#endif - /* merge with common flags */ return (flags | bxe_get_common_flags(sc, fp, TRUE)); } @@ -11890,11 +11452,6 @@ bxe_setup_queue(struct bxe_softc *sc, struct ecore_queue_state_params q_params = { NULL }; struct ecore_queue_setup_params *setup_params = &q_params.params.setup; -#if 0 - struct ecore_queue_setup_tx_only_params *tx_only_params = - &q_params.params.tx_only; - uint8_t tx_index; -#endif int rc; BLOGD(sc, DBG_LOAD, "setting up queue %d\n", fp->index); @@ -11915,7 +11472,7 @@ bxe_setup_queue(struct bxe_softc *sc, /* Change the state to INIT */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { - BLOGE(sc, "Queue(%d) INIT failed\n", fp->index); + BLOGE(sc, "Queue(%d) INIT failed rc = %d\n", fp->index, rc); return (rc); } @@ -11945,26 +11502,10 @@ bxe_setup_queue(struct bxe_softc *sc, /* change the state to SETUP */ rc = ecore_queue_state_change(sc, &q_params); if (rc) { - BLOGE(sc, "Queue(%d) SETUP failed\n", fp->index); + BLOGE(sc, "Queue(%d) SETUP failed (rc = %d)\n", fp->index, rc); return (rc); } -#if 0 - /* loop through the relevant tx-only indices */ - for (tx_index = FIRST_TX_ONLY_COS_INDEX; - tx_index < sc->max_cos; - tx_index++) { - /* prepare and send tx-only ramrod*/ - rc = bxe_setup_tx_only(sc, fp, &q_params, - tx_only_params, tx_index, leading); - if (rc) { - BLOGE(sc, "Queue(%d.%d) TX_ONLY_SETUP failed\n", - fp->index, tx_index); - return (rc); - } - } -#endif - return (rc); } @@ -12115,27 +11656,6 @@ bxe_set_eth_mac(struct bxe_softc *sc, set, ECORE_ETH_MAC, &ramrod_flags)); } -#if 0 -static void -bxe_update_max_mf_config(struct bxe_softc *sc, - uint32_t value) -{ - /* load old values */ - uint32_t mf_cfg = sc->devinfo.mf_info.mf_config[SC_VN(sc)]; - - if (value != bxe_extract_max_cfg(sc, mf_cfg)) { - /* leave all but MAX value */ - mf_cfg &= ~FUNC_MF_CFG_MAX_BW_MASK; - - /* set new MAX value */ - mf_cfg |= ((value << FUNC_MF_CFG_MAX_BW_SHIFT) & - FUNC_MF_CFG_MAX_BW_MASK); - - bxe_fw_command(sc, DRV_MSG_CODE_SET_MF_BW, mf_cfg); - } -} -#endif - static int bxe_get_cur_phy_idx(struct bxe_softc *sc) { @@ -12375,12 +11895,6 @@ bxe_link_status_update(struct bxe_softc *sc) return; } -#if 0 - /* read updated dcb configuration */ - if (IS_PF(sc)) - bxe_dcbx_pmf_update(sc); -#endif - if (IS_PF(sc) && !CHIP_REV_IS_SLOW(sc)) { elink_link_status_update(&sc->link_params, &sc->link_vars); } else { @@ -12696,18 +12210,6 @@ bxe_set_rx_mode(struct bxe_softc *sc) rx_mode = BXE_RX_MODE_PROMISC; } } -#if 0 - else { - /* - * Configuring mcast to a VF involves sleeping (when we - * wait for the PF's response). Since this function is - * called from a non sleepable context we must schedule - * a work item for this purpose - */ - bxe_set_bit(BXE_SP_RTNL_VFPF_MCAST, &sc->sp_rtnl_state); - schedule_delayed_work(&sc->sp_rtnl_task, 0); - } -#endif } sc->rx_mode = rx_mode; @@ -12722,19 +12224,6 @@ bxe_set_rx_mode(struct bxe_softc *sc) if (IS_PF(sc)) { bxe_set_storm_rx_mode(sc); } -#if 0 - else { - /* - * Configuring mcast to a VF involves sleeping (when we - * wait for the PF's response). Since this function is - * called from a non sleepable context we must schedule - * a work item for this purpose - */ - bxe_set_bit(BXE_SP_RTNL_VFPF_STORM_RX_MODE, &sc->sp_rtnl_state); - schedule_delayed_work(&sc->sp_rtnl_task, 0); - } -#endif - } @@ -12842,13 +12331,6 @@ bxe_periodic_callout_func(void *xsc) /* state is BXE_STATE_OPEN */ bxe_stats_handle(sc, STATS_EVENT_UPDATE); -#if 0 - /* sample VF bulletin board for new posts from PF */ - if (IS_VF(sc)) { - bxe_sample_bulletin(sc); - } -#endif - BXE_CORE_UNLOCK(sc); if ((sc->state == BXE_STATE_OPEN) && @@ -12983,7 +12465,7 @@ bxe_nic_load(struct bxe_softc *sc, sc->state = BXE_STATE_OPENING_WAITING_PORT; rc = bxe_func_start(sc); if (rc) { - BLOGE(sc, "Function start failed!\n"); + BLOGE(sc, "Function start failed! rc = %d\n", rc); bxe_fw_command(sc, DRV_MSG_CODE_LOAD_DONE, 0); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; @@ -13002,7 +12484,7 @@ bxe_nic_load(struct bxe_softc *sc, rc = bxe_setup_leading(sc); if (rc) { - BLOGE(sc, "Setup leading failed!\n"); + BLOGE(sc, "Setup leading failed! rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } @@ -13010,7 +12492,7 @@ bxe_nic_load(struct bxe_softc *sc, FOR_EACH_NONDEFAULT_ETH_QUEUE(sc, i) { rc = bxe_setup_queue(sc, &sc->fp[i], FALSE); if (rc) { - BLOGE(sc, "Queue(%d) setup failed\n", i); + BLOGE(sc, "Queue(%d) setup failed rc = %d\n", i, rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } @@ -13024,18 +12506,6 @@ bxe_nic_load(struct bxe_softc *sc, } } /* XXX VF */ -#if 0 - else { /* VF */ - FOR_EACH_ETH_QUEUE(sc, i) { - rc = bxe_vfpf_setup_q(sc, i); - if (rc) { - BLOGE(sc, "Queue(%d) setup failed\n", i); - sc->state = BXE_STATE_ERROR; - goto bxe_nic_load_error3; - } - } - } -#endif /* now when Clients are configured we are ready to work */ sc->state = BXE_STATE_OPEN; @@ -13044,25 +12514,12 @@ bxe_nic_load(struct bxe_softc *sc, if (IS_PF(sc)) { rc = bxe_set_eth_mac(sc, TRUE); } -#if 0 - else { /* IS_VF(sc) */ - rc = bxe_vfpf_set_mac(sc); - } -#endif if (rc) { - BLOGE(sc, "Setting Ethernet MAC failed\n"); + BLOGE(sc, "Setting Ethernet MAC failed rc = %d\n", rc); sc->state = BXE_STATE_ERROR; goto bxe_nic_load_error3; } -#if 0 - if (IS_PF(sc) && sc->pending_max) { - /* for AFEX */ - bxe_update_max_mf_config(sc, sc->pending_max); - sc->pending_max = 0; - } -#endif - if (sc->port.pmf) { rc = bxe_initial_phy_init(sc, /* XXX load_mode */LOAD_OPEN); if (rc) { @@ -13120,13 +12577,6 @@ bxe_nic_load(struct bxe_softc *sc, return (ENXIO); } -#if 0 - /* If PMF - send ADMIN DCBX msg to MFW to initiate DCBX FSM */ - if (sc->port.pmf && (sc->state != BXE_STATE_DIAG)) { - bxe_dcbx_init(sc, FALSE); - } -#endif - /* Tell the stack the driver is running! */ if_setdrvflags(sc->ifp, IFF_DRV_RUNNING); @@ -13402,11 +12852,6 @@ bxe_allocate_bars(struct bxe_softc *sc) SYS_RES_MEMORY, &sc->bar[i].rid, flags)) == NULL) { -#if 0 - /* BAR4 doesn't exist for E1 */ - BLOGE(sc, "PCI BAR%d [%02x] memory allocation failed\n", - i, PCIR_BAR(i)); -#endif return (0); } @@ -14054,19 +13499,6 @@ bxe_get_shmem_info(struct bxe_softc *sc) BLOGD(sc, DBG_LOAD, "Ethernet address: %s\n", sc->mac_addr_str); } -#if 0 - if (!IS_MF(sc) && - ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == - PORT_FEAT_CFG_STORAGE_PERSONALITY_FCOE)) { - sc->flags |= BXE_NO_ISCSI; - } - if (!IS_MF(sc) && - ((sc->port.config & PORT_FEAT_CFG_STORAGE_PERSONALITY_MASK) == - PORT_FEAT_CFG_STORAGE_PERSONALITY_ISCSI)) { - sc->flags |= BXE_NO_FCOE_FLAG; - } -#endif - return (0); } @@ -15239,8 +14671,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " - "'fp %d tx mbufs' (%d)\n", - i, rc); + "'fp %d tx mbufs' (%d)\n", i, rc); return (1); } @@ -15251,8 +14682,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) &fp->tx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " - "'fp %d tx mbuf %d' (%d)\n", - i, j, rc); + "'fp %d tx mbuf %d' (%d)\n", i, j, rc); return (1); } } @@ -15279,8 +14709,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " - "'fp %d rx mbufs' (%d)\n", - i, rc); + "'fp %d rx mbufs' (%d)\n", i, rc); return (1); } @@ -15291,8 +14720,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) &fp->rx_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " - "'fp %d rx mbuf %d' (%d)\n", - i, j, rc); + "'fp %d rx mbuf %d' (%d)\n", i, j, rc); return (1); } } @@ -15303,8 +14731,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) &fp->rx_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " - "'fp %d spare rx mbuf' (%d)\n", - i, rc); + "'fp %d spare rx mbuf' (%d)\n", i, rc); return (1); } @@ -15330,8 +14757,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) if (rc != 0) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma tag for " - "'fp %d rx sge mbufs' (%d)\n", - i, rc); + "'fp %d rx sge mbufs' (%d)\n", i, rc); return (1); } @@ -15342,8 +14768,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) &fp->rx_sge_mbuf_chain[j].m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " - "'fp %d rx sge mbuf %d' (%d)\n", - i, j, rc); + "'fp %d rx sge mbuf %d' (%d)\n", i, j, rc); return (1); } } @@ -15354,8 +14779,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) &fp->rx_sge_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " - "'fp %d spare rx sge mbuf' (%d)\n", - i, rc); + "'fp %d spare rx sge mbuf' (%d)\n", i, rc); return (1); } @@ -15372,8 +14796,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) &fp->rx_tpa_info[j].bd.m_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " - "'fp %d rx tpa mbuf %d' (%d)\n", - i, j, rc); + "'fp %d rx tpa mbuf %d' (%d)\n", i, j, rc); return (1); } } @@ -15384,8 +14807,7 @@ bxe_alloc_hsi_mem(struct bxe_softc *sc) &fp->rx_tpa_info_mbuf_spare_map)) { /* XXX unwind and free previous fastpath allocations */ BLOGE(sc, "Failed to create dma map for " - "'fp %d spare rx tpa mbuf' (%d)\n", - i, rc); + "'fp %d spare rx tpa mbuf' (%d)\n", i, rc); return (1); } @@ -16068,7 +15490,8 @@ bxe_prev_unload(struct bxe_softc *sc) } while (--time_counter); if (!time_counter || rc) { - BLOGE(sc, "Failed to unload previous driver!\n"); + BLOGE(sc, "Failed to unload previous driver!" + " time_counter %d rc %d\n", time_counter, rc); rc = -1; } @@ -16845,88 +16268,12 @@ static void bxe_iov_init_dmae(struct bxe_softc *sc) { return; -#if 0 - BLOGD(sc, DBG_LOAD, "SRIOV is %s\n", IS_SRIOV(sc) ? "ON" : "OFF"); - - if (!IS_SRIOV(sc)) { - return; - } - - REG_WR(sc, DMAE_REG_BACKWARD_COMP_EN, 0); -#endif } -#if 0 -static int -bxe_iov_init_ilt(struct bxe_softc *sc, - uint16_t line) -{ - return (line); -#if 0 - int i; - struct ecore_ilt* ilt = sc->ilt; - - if (!IS_SRIOV(sc)) { - return (line); - } - - /* set vfs ilt lines */ - for (i = 0; i < BXE_VF_CIDS/ILT_PAGE_CIDS ; i++) { - struct hw_dma *hw_cxt = SC_VF_CXT_PAGE(sc,i); - ilt->lines[line+i].page = hw_cxt->addr; - ilt->lines[line+i].page_mapping = hw_cxt->mapping; - ilt->lines[line+i].size = hw_cxt->size; /* doesn't matter */ - } - return (line+i); -#endif -} -#endif - static void bxe_iov_init_dq(struct bxe_softc *sc) { return; -#if 0 - if (!IS_SRIOV(sc)) { - return; - } - - /* Set the DQ such that the CID reflect the abs_vfid */ - REG_WR(sc, DORQ_REG_VF_NORM_VF_BASE, 0); - REG_WR(sc, DORQ_REG_MAX_RVFID_SIZE, ilog2(BNX2X_MAX_NUM_OF_VFS)); - - /* - * Set VFs starting CID. If its > 0 the preceding CIDs are belong to - * the PF L2 queues - */ - REG_WR(sc, DORQ_REG_VF_NORM_CID_BASE, BNX2X_FIRST_VF_CID); - - /* The VF window size is the log2 of the max number of CIDs per VF */ - REG_WR(sc, DORQ_REG_VF_NORM_CID_WND_SIZE, BNX2X_VF_CID_WND); - - /* - * The VF doorbell size 0 - *B, 4 - 128B. We set it here to match - * the Pf doorbell size although the 2 are independent. - */ - REG_WR(sc, DORQ_REG_VF_NORM_CID_OFST, - BNX2X_DB_SHIFT - BNX2X_DB_MIN_SHIFT); - - /* - * No security checks for now - - * configure single rule (out of 16) mask = 0x1, value = 0x0, - * CID range 0 - 0x1ffff - */ - REG_WR(sc, DORQ_REG_VF_TYPE_MASK_0, 1); - REG_WR(sc, DORQ_REG_VF_TYPE_VALUE_0, 0); - REG_WR(sc, DORQ_REG_VF_TYPE_MIN_MCID_0, 0); - REG_WR(sc, DORQ_REG_VF_TYPE_MAX_MCID_0, 0x1ffff); - - /* set the number of VF alllowed doorbells to the full DQ range */ - REG_WR(sc, DORQ_REG_VF_NORM_MAX_CID_COUNT, 0x20000); - - /* set the VF doorbell threshold */ - REG_WR(sc, DORQ_REG_VF_USAGE_CT_LIMIT, 4); -#endif } /* send a NIG loopback debug packet */ @@ -17080,7 +16427,7 @@ bxe_int_mem_test(struct bxe_softc *sc) val = REG_RD(sc, NIG_REG_INGRESS_EOP_LB_EMPTY); if (val != 1) { - BLOGE(sc, "clear of NIG failed\n"); + BLOGE(sc, "clear of NIG failed val=0x%x\n", val); return (-4); } @@ -17321,12 +16668,13 @@ bxe_init_hw_common(struct bxe_softc *sc) /* finish PXP init */ val = REG_RD(sc, PXP2_REG_RQ_CFG_DONE); if (val != 1) { - BLOGE(sc, "PXP2 CFG failed\n"); + BLOGE(sc, "PXP2 CFG failed PXP2_REG_RQ_CFG_DONE val = 0x%x\n", + val); return (-1); } val = REG_RD(sc, PXP2_REG_RD_INIT_DONE); if (val != 1) { - BLOGE(sc, "PXP2 RD_INIT failed\n"); + BLOGE(sc, "PXP2 RD_INIT failed val = 0x%x\n", val); return (-1); } @@ -17451,7 +16799,7 @@ bxe_init_hw_common(struct bxe_softc *sc) } while (factor-- && (val != 1)); if (val != 1) { - BLOGE(sc, "ATC_INIT failed\n"); + BLOGE(sc, "ATC_INIT failed val = 0x%x\n", val); return (-1); } } @@ -17660,17 +17008,17 @@ bxe_init_hw_common(struct bxe_softc *sc) /* finish CFC init */ val = reg_poll(sc, CFC_REG_LL_INIT_DONE, 1, 100, 10); if (val != 1) { - BLOGE(sc, "CFC LL_INIT failed\n"); + BLOGE(sc, "CFC LL_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_AC_INIT_DONE, 1, 100, 10); if (val != 1) { - BLOGE(sc, "CFC AC_INIT failed\n"); + BLOGE(sc, "CFC AC_INIT failed val=0x%x\n", val); return (-1); } val = reg_poll(sc, CFC_REG_CAM_INIT_DONE, 1, 100, 10); if (val != 1) { - BLOGE(sc, "CFC CAM_INIT failed\n"); + BLOGE(sc, "CFC CAM_INIT failed val=0x%x\n", val); return (-1); } REG_WR(sc, CFC_REG_DEBUG0, 0); @@ -17682,7 +17030,7 @@ bxe_init_hw_common(struct bxe_softc *sc) /* do internal memory self test */ if ((val == 0) && bxe_int_mem_test(sc)) { - BLOGE(sc, "internal mem self test failed\n"); + BLOGE(sc, "internal mem self test failed val=0x%x\n", val); return (-1); } } @@ -17718,6 +17066,7 @@ bxe_init_hw_common_chip(struct bxe_softc *sc) int rc = bxe_init_hw_common(sc); if (rc) { + BLOGE(sc, "bxe_init_hw_common failed rc=%d\n", rc); return (rc); } @@ -18306,17 +17655,6 @@ bxe_pf_flr_clnup(struct bxe_softc *sc) return (0); } -#if 0 -static void -bxe_init_searcher(struct bxe_softc *sc) -{ - int port = SC_PORT(sc); - ecore_src_init_t2(sc, sc->t2, sc->t2_mapping, SRC_CONN_NUM); - /* T1 hash bits value determines the T1 number of entries */ - REG_WR(sc, SRC_REG_NUMBER_HASH_BITS0 + port*4, SRC_HASH_BITS); -} -#endif - static int bxe_init_hw_func(struct bxe_softc *sc) { @@ -18356,21 +17694,6 @@ bxe_init_hw_func(struct bxe_softc *sc) ilt = sc->ilt; cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; -#if 0 - if (IS_SRIOV(sc)) { - cdu_ilt_start += BXE_FIRST_VF_CID/ILT_PAGE_CIDS; - } - cdu_ilt_start = bxe_iov_init_ilt(sc, cdu_ilt_start); - -#if (BXE_FIRST_VF_CID > 0) - /* - * If BXE_FIRST_VF_CID > 0 then the PF L2 cids precedes - * those of the VFs, so start line should be reset - */ - cdu_ilt_start = ilt->clients[ILT_CLIENT_CDU].start; -#endif -#endif - for (i = 0; i < L2_ILT_LINES(sc); i++) { ilt->lines[cdu_ilt_start + i].page = sc->context[i].vcxt; ilt->lines[cdu_ilt_start + i].page_mapping = @@ -18379,18 +17702,9 @@ bxe_init_hw_func(struct bxe_softc *sc) } ecore_ilt_init_op(sc, INITOP_SET); -#if 0 - if (!CONFIGURE_NIC_MODE(sc)) { - bxe_init_searcher(sc); - REG_WR(sc, PRS_REG_NIC_MODE, 0); - BLOGD(sc, DBG_LOAD, "NIC MODE disabled\n"); - } else -#endif - { - /* Set NIC mode */ - REG_WR(sc, PRS_REG_NIC_MODE, 1); - BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); - } + /* Set NIC mode */ + REG_WR(sc, PRS_REG_NIC_MODE, 1); + BLOGD(sc, DBG_LOAD, "NIC MODE configured\n"); if (!CHIP_IS_E1x(sc)) { uint32_t pf_conf = IGU_PF_CONF_FUNC_EN; @@ -18743,15 +18057,6 @@ bxe_reset_func(struct bxe_softc *sc) SB_DISABLED); } -#if 0 - if (CNIC_LOADED(sc)) { - /* CNIC SB */ - REG_WR8(sc, BAR_CSTRORM_INTMEM + - CSTORM_STATUS_BLOCK_DATA_STATE_OFFSET - (bxe_cnic_fw_sb_id(sc)), SB_DISABLED); - } -#endif - /* SP SB */ REG_WR8(sc, BAR_CSTRORM_INTMEM + CSTORM_SP_STATUS_BLOCK_DATA_STATE_OFFSET(func), diff --git a/sys/dev/bxe/bxe.h b/sys/dev/bxe/bxe.h index ea4006441a91..c3240c9a1edd 100644 --- a/sys/dev/bxe/bxe.h +++ b/sys/dev/bxe/bxe.h @@ -320,13 +320,6 @@ struct bxe_device_type #define RX_BD_USABLE (RX_BD_USABLE_PER_PAGE * RX_BD_NUM_PAGES) #define RX_BD_MAX (RX_BD_TOTAL - 1) -#if 0 -#define NUM_RX_RINGS RX_BD_NUM_PAGES -#define NUM_RX_BD RX_BD_TOTAL -#define MAX_RX_BD RX_BD_MAX -#define MAX_RX_AVAIL RX_BD_USABLE -#endif - #define RX_BD_NEXT(x) \ ((((x) & RX_BD_PER_PAGE_MASK) == (RX_BD_USABLE_PER_PAGE - 1)) ? \ ((x) + 3) : ((x) + 1)) @@ -386,13 +379,6 @@ struct bxe_device_type #define RCQ_PAGE(x) (((x) & ~RCQ_USABLE_PER_PAGE) >> 7) #define RCQ_IDX(x) ((x) & RCQ_USABLE_PER_PAGE) -#if 0 -#define NUM_RCQ_RINGS RCQ_NUM_PAGES -#define NUM_RCQ_BD RCQ_TOTAL -#define MAX_RCQ_BD RCQ_MAX -#define MAX_RCQ_AVAIL RCQ_USABLE -#endif - /* * dropless fc calculations for RCQs * Number of RCQs should be as number of buffers in BRB: @@ -627,14 +613,6 @@ struct bxe_fastpath { struct bxe_sw_tpa_info rx_tpa_info[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; bus_dmamap_t rx_tpa_info_mbuf_spare_map; uint64_t rx_tpa_queue_used; -#if 0 - bus_dmamap_t rx_tpa_mbuf_map[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; - bus_dmamap_t rx_tpa_mbuf_spare_map; - struct mbuf *rx_tpa_mbuf_ptr[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; - bus_dma_segment_t rx_tpa_mbuf_segs[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; - - uint8_t tpa_state[ETH_MAX_AGGREGATION_QUEUES_E1H_E2]; -#endif uint16_t *sb_index_values; uint16_t *sb_running_index; @@ -688,16 +666,6 @@ struct bxe_fastpath { uint16_t tx_bd_prod; uint16_t tx_bd_cons; -#if 0 - /* status block number in hardware */ - uint8_t sb_id; -#define FP_SB_ID(fp) (fp->sb_id) - - /* driver copy of the fastpath CSTORM/USTORM indices */ - uint16_t fp_c_idx; - uint16_t fp_u_idx; -#endif - uint64_t sge_mask[RX_SGE_MASK_LEN]; uint16_t rx_sge_prod; @@ -964,19 +932,6 @@ struct bxe_fw_stats_data { */ struct bxe_slowpath { -#if 0 - /* - * The cdu_context array MUST be the first element in this - * structure. It is used during the leading edge ramrod - * operation. - */ - union cdu_context context[MAX_CONTEXT]; - - /* Used as a DMA source for MAC configuration. */ - struct mac_configuration_cmd mac_config; - struct mac_configuration_cmd mcast_config; -#endif - /* used by the DMAE command executer */ struct dmae_command dmae[MAX_DMAE_C]; @@ -1754,10 +1709,6 @@ struct bxe_softc { uint8_t dropless_fc; -#if 0 - struct bxe_dma *t2; -#endif - /* total number of FW statistics requests */ uint8_t fw_stats_num; /* @@ -1953,13 +1904,6 @@ void bxe_reg_write32(struct bxe_softc *sc, bus_size_t offset, uint32_t val); #define BXE_FP(sc, nr, var) ((sc)->fp[(nr)].var) #define BXE_SP_OBJ(sc, fp) ((sc)->sp_objs[(fp)->index]) -#if 0 -#define bxe_fp(sc, nr, var) ((sc)->fp[nr].var) -#define bxe_sp_obj(sc, fp) ((sc)->sp_objs[(fp)->index]) -#define bxe_fp_stats(sc, fp) (&(sc)->fp_stats[(fp)->index]) -#define bxe_fp_qstats(sc, fp) (&(sc)->fp_stats[(fp)->index].eth_q_stats) -#endif - #define REG_RD_DMAE(sc, offset, valp, len32) \ do { \ bxe_read_dmae(sc, offset, len32); \ @@ -2487,12 +2431,6 @@ bxe_stats_id(struct bxe_fastpath *fp) struct bxe_softc *sc = fp->sc; if (!CHIP_IS_E1x(sc)) { -#if 0 - /* there are special statistics counters for FCoE 136..140 */ - if (IS_FCOE_FP(fp)) { - return (sc->cnic_base_cl_id + (sc->pf_num >> 1)); - } -#endif return (fp->cl_id); } diff --git a/sys/dev/bxe/bxe_stats.c b/sys/dev/bxe/bxe_stats.c index 6a8551c6500d..832565913023 100644 --- a/sys/dev/bxe/bxe_stats.c +++ b/sys/dev/bxe/bxe_stats.c @@ -1558,23 +1558,6 @@ bxe_prep_fw_stats_req(struct bxe_softc *sc) cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); -#if 0 - /**** FCoE FW statistics data ****/ - if (!NO_FCOE(sc)) { - cur_data_offset = (sc->fw_stats_data_mapping + - offsetof(struct bxe_fw_stats_data, fcoe)); - - cur_query_entry = &sc->fw_stats_req->query[BXE_FCOE_QUERY_IDX]; - - cur_query_entry->kind = STATS_TYPE_FCOE; - /* For FCoE query index is a DONT CARE */ - cur_query_entry->index = SC_PORT(sc); - cur_query_entry->funcID = cpu_to_le16(SC_FUNC(sc)); - cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); - cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); - } -#endif - /**** Clients' queries ****/ cur_data_offset = (sc->fw_stats_data_mapping + offsetof(struct bxe_fw_stats_data, queue_stats)); @@ -1583,12 +1566,7 @@ bxe_prep_fw_stats_req(struct bxe_softc *sc) * First queue query index depends whether FCoE offloaded request will * be included in the ramrod */ -#if 0 - if (!NO_FCOE(sc)) - first_queue_query_index = BXE_FIRST_QUEUE_QUERY_IDX; - else -#endif - first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1); + first_queue_query_index = (BXE_FIRST_QUEUE_QUERY_IDX - 1); for (i = 0; i < sc->num_queues; i++) { cur_query_entry = @@ -1602,20 +1580,6 @@ bxe_prep_fw_stats_req(struct bxe_softc *sc) cur_data_offset += sizeof(struct per_queue_stats); } - -#if 0 - /* add FCoE queue query if needed */ - if (!NO_FCOE(sc)) { - cur_query_entry = - &sc->fw_stats_req->query[first_queue_query_index + i]; - - cur_query_entry->kind = STATS_TYPE_QUEUE; - cur_query_entry->index = bxe_stats_id(&sc->fp[FCOE_IDX(sc)]); - cur_query_entry->funcID = htole16(SC_FUNC(sc)); - cur_query_entry->address.hi = htole32(U64_HI(cur_data_offset)); - cur_query_entry->address.lo = htole32(U64_LO(cur_data_offset)); - } -#endif } void @@ -1757,22 +1721,6 @@ bxe_afex_collect_stats(struct bxe_softc *sc, int i; struct afex_stats *afex_stats = (struct afex_stats *)void_afex_stats; struct bxe_eth_stats *estats = &sc->eth_stats; -#if 0 - struct per_queue_stats *fcoe_q_stats = - &sc->fw_stats_data->queue_stats[FCOE_IDX(sc)]; - - struct tstorm_per_queue_stats *fcoe_q_tstorm_stats = - &fcoe_q_stats->tstorm_queue_statistics; - - struct ustorm_per_queue_stats *fcoe_q_ustorm_stats = - &fcoe_q_stats->ustorm_queue_statistics; - - struct xstorm_per_queue_stats *fcoe_q_xstorm_stats = - &fcoe_q_stats->xstorm_queue_statistics; - - struct fcoe_statistics_params *fw_fcoe_stat = - &sc->fw_stats_data->fcoe; -#endif memset(afex_stats, 0, sizeof(struct afex_stats)); @@ -1869,144 +1817,6 @@ bxe_afex_collect_stats(struct bxe_softc *sc, qstats->total_transmitted_dropped_packets_error_lo); } -#if 0 - /* - * Now add FCoE statistics which are collected separately - * (both offloaded and non offloaded) - */ - if (!NO_FCOE(sc)) { - ADD_64_LE(afex_stats->rx_unicast_bytes_hi, - LE32_0, - afex_stats->rx_unicast_bytes_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_byte_cnt); - - ADD_64_LE(afex_stats->rx_unicast_bytes_hi, - fcoe_q_tstorm_stats->rcv_ucast_bytes.hi, - afex_stats->rx_unicast_bytes_lo, - fcoe_q_tstorm_stats->rcv_ucast_bytes.lo); - - ADD_64_LE(afex_stats->rx_broadcast_bytes_hi, - fcoe_q_tstorm_stats->rcv_bcast_bytes.hi, - afex_stats->rx_broadcast_bytes_lo, - fcoe_q_tstorm_stats->rcv_bcast_bytes.lo); - - ADD_64_LE(afex_stats->rx_multicast_bytes_hi, - fcoe_q_tstorm_stats->rcv_mcast_bytes.hi, - afex_stats->rx_multicast_bytes_lo, - fcoe_q_tstorm_stats->rcv_mcast_bytes.lo); - - ADD_64_LE(afex_stats->rx_unicast_frames_hi, - LE32_0, - afex_stats->rx_unicast_frames_lo, - fw_fcoe_stat->rx_stat0.fcoe_rx_pkt_cnt); - - ADD_64_LE(afex_stats->rx_unicast_frames_hi, - LE32_0, - afex_stats->rx_unicast_frames_lo, - fcoe_q_tstorm_stats->rcv_ucast_pkts); - - ADD_64_LE(afex_stats->rx_broadcast_frames_hi, - LE32_0, - afex_stats->rx_broadcast_frames_lo, - fcoe_q_tstorm_stats->rcv_bcast_pkts); - - ADD_64_LE(afex_stats->rx_multicast_frames_hi, - LE32_0, - afex_stats->rx_multicast_frames_lo, - fcoe_q_tstorm_stats->rcv_ucast_pkts); - - ADD_64_LE(afex_stats->rx_frames_discarded_hi, - LE32_0, - afex_stats->rx_frames_discarded_lo, - fcoe_q_tstorm_stats->checksum_discard); - - ADD_64_LE(afex_stats->rx_frames_discarded_hi, - LE32_0, - afex_stats->rx_frames_discarded_lo, - fcoe_q_tstorm_stats->pkts_too_big_discard); - - ADD_64_LE(afex_stats->rx_frames_discarded_hi, - LE32_0, - afex_stats->rx_frames_discarded_lo, - fcoe_q_tstorm_stats->ttl0_discard); - - ADD_64_LE16(afex_stats->rx_frames_dropped_hi, - LE16_0, - afex_stats->rx_frames_dropped_lo, - fcoe_q_tstorm_stats->no_buff_discard); - - ADD_64_LE(afex_stats->rx_frames_dropped_hi, - LE32_0, - afex_stats->rx_frames_dropped_lo, - fcoe_q_ustorm_stats->ucast_no_buff_pkts); - - ADD_64_LE(afex_stats->rx_frames_dropped_hi, - LE32_0, - afex_stats->rx_frames_dropped_lo, - fcoe_q_ustorm_stats->mcast_no_buff_pkts); - - ADD_64_LE(afex_stats->rx_frames_dropped_hi, - LE32_0, - afex_stats->rx_frames_dropped_lo, - fcoe_q_ustorm_stats->bcast_no_buff_pkts); - - ADD_64_LE(afex_stats->rx_frames_dropped_hi, - LE32_0, - afex_stats->rx_frames_dropped_lo, - fw_fcoe_stat->rx_stat1.fcoe_rx_drop_pkt_cnt); - - ADD_64_LE(afex_stats->rx_frames_dropped_hi, - LE32_0, - afex_stats->rx_frames_dropped_lo, - fw_fcoe_stat->rx_stat2.fcoe_rx_drop_pkt_cnt); - - ADD_64_LE(afex_stats->tx_unicast_bytes_hi, - LE32_0, - afex_stats->tx_unicast_bytes_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_byte_cnt); - - ADD_64_LE(afex_stats->tx_unicast_bytes_hi, - fcoe_q_xstorm_stats->ucast_bytes_sent.hi, - afex_stats->tx_unicast_bytes_lo, - fcoe_q_xstorm_stats->ucast_bytes_sent.lo); - - ADD_64_LE(afex_stats->tx_broadcast_bytes_hi, - fcoe_q_xstorm_stats->bcast_bytes_sent.hi, - afex_stats->tx_broadcast_bytes_lo, - fcoe_q_xstorm_stats->bcast_bytes_sent.lo); - - ADD_64_LE(afex_stats->tx_multicast_bytes_hi, - fcoe_q_xstorm_stats->mcast_bytes_sent.hi, - afex_stats->tx_multicast_bytes_lo, - fcoe_q_xstorm_stats->mcast_bytes_sent.lo); - - ADD_64_LE(afex_stats->tx_unicast_frames_hi, - LE32_0, - afex_stats->tx_unicast_frames_lo, - fw_fcoe_stat->tx_stat.fcoe_tx_pkt_cnt); - - ADD_64_LE(afex_stats->tx_unicast_frames_hi, - LE32_0, - afex_stats->tx_unicast_frames_lo, - fcoe_q_xstorm_stats->ucast_pkts_sent); - - ADD_64_LE(afex_stats->tx_broadcast_frames_hi, - LE32_0, - afex_stats->tx_broadcast_frames_lo, - fcoe_q_xstorm_stats->bcast_pkts_sent); - - ADD_64_LE(afex_stats->tx_multicast_frames_hi, - LE32_0, - afex_stats->tx_multicast_frames_lo, - fcoe_q_xstorm_stats->mcast_pkts_sent); - - ADD_64_LE(afex_stats->tx_frames_dropped_hi, - LE32_0, - afex_stats->tx_frames_dropped_lo, - fcoe_q_xstorm_stats->error_drop_pkts); - } -#endif - /* * If port stats are requested, add them to the PMF * stats, as anyway they will be accumulated by the diff --git a/sys/dev/bxe/ecore_init.h b/sys/dev/bxe/ecore_init.h index 31417cc4cd90..7e1af17aea66 100644 --- a/sys/dev/bxe/ecore_init.h +++ b/sys/dev/bxe/ecore_init.h @@ -749,17 +749,10 @@ static inline void ecore_set_mcp_parity(struct bxe_softc *sc, uint8_t enable) for (i = 0; i < ARRSIZE(mcp_attn_ctl_regs); i++) { reg_val = REG_RD(sc, mcp_attn_ctl_regs[i].addr); -#if 0 - if (enable) - reg_val |= MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */ - else - reg_val &= ~MISC_AEU_ENABLE_MCP_PRTY_BITS; /* Linux is using mcp_attn_ctl_regs[i].bits */ -#else if (enable) reg_val |= mcp_attn_ctl_regs[i].bits; else reg_val &= ~mcp_attn_ctl_regs[i].bits; -#endif REG_WR(sc, mcp_attn_ctl_regs[i].addr, reg_val); } -- cgit v1.2.3 From 2bc4747cc49351d47d2bdacd5c5d82bf4b44474b Mon Sep 17 00:00:00 2001 From: Eric Joyner Date: Fri, 19 Feb 2016 22:48:20 +0000 Subject: ixl(4)/ixlv(4): Revert m_collapse() in ixl_xmit() to m_defrag(). The m_collapse() call would fail when transmitting medium-sized packets when the interface mtu was set to 9000, so revert back to m_defrag(), which does not fail. Differential Revision: https://reviews.freebsd.org/D5207 Tested by: jeffrey.e.pieper@intel.com Sponsored by: Intel Corporation --- sys/dev/ixl/ixl_txrx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/dev/ixl/ixl_txrx.c b/sys/dev/ixl/ixl_txrx.c index 43a19490c074..e78af119bfcb 100644 --- a/sys/dev/ixl/ixl_txrx.c +++ b/sys/dev/ixl/ixl_txrx.c @@ -286,7 +286,7 @@ ixl_xmit(struct ixl_queue *que, struct mbuf **m_headp) if (error == EFBIG) { struct mbuf *m; - m = m_collapse(*m_headp, M_NOWAIT, maxsegs); + m = m_defrag(*m_headp, M_NOWAIT); if (m == NULL) { que->mbuf_defrag_failed++; m_freem(*m_headp); -- cgit v1.2.3 From 7915adb56014e31e8ed8a4418060625c2e223fc0 Mon Sep 17 00:00:00 2001 From: Justin Hibbits Date: Sat, 20 Feb 2016 01:32:58 +0000 Subject: Introduce a RMAN_IS_DEFAULT_RANGE() macro, and use it. This simplifies checking for default resource range for bus_alloc_resource(), and improves readability. This is part of, and related to, the migration of rman_res_t from u_long to uintmax_t. Discussed with: jhb Suggested by: marcel --- sys/arm/at91/at91.c | 2 +- sys/arm/at91/at91_pinctrl.c | 2 +- sys/arm/cavium/cns11xx/econa.c | 2 +- sys/arm/mv/mv_localbus.c | 2 +- sys/arm/mv/mv_pci.c | 2 +- sys/arm64/arm64/gic_fdt.c | 2 +- sys/arm64/arm64/gic_v3_fdt.c | 3 ++- sys/arm64/arm64/nexus.c | 2 +- sys/arm64/cavium/thunder_pcie.c | 2 +- sys/arm64/cavium/thunder_pcie_fdt.c | 2 +- sys/arm64/cavium/thunder_pcie_pem.c | 2 +- sys/dev/acpica/acpi.c | 2 +- sys/dev/eisa/eisaconf.c | 2 +- sys/dev/fdt/simplebus.c | 2 +- sys/dev/gpio/gpiobus.c | 2 +- sys/dev/mca/mca_bus.c | 2 +- sys/dev/ofw/ofwbus.c | 2 +- sys/dev/pccard/pccard.c | 2 +- sys/dev/siba/siba.c | 2 +- sys/dev/vnic/mrml_bridge.c | 2 +- sys/kern/subr_bus.c | 2 +- sys/mips/adm5120/obio.c | 2 +- sys/mips/alchemy/obio.c | 2 +- sys/mips/atheros/apb.c | 2 +- sys/mips/beri/beri_simplebus.c | 2 +- sys/mips/idt/obio.c | 2 +- sys/mips/mips/nexus.c | 2 +- sys/mips/nlm/xlp_simplebus.c | 2 +- sys/mips/rt305x/obio.c | 2 +- sys/mips/sibyte/sb_zbbus.c | 2 +- sys/powerpc/mpc85xx/isa.c | 2 +- sys/riscv/riscv/nexus.c | 2 +- sys/sparc64/central/central.c | 2 +- sys/sparc64/ebus/ebus.c | 2 +- sys/sparc64/fhc/fhc.c | 2 +- sys/sparc64/isa/isa.c | 2 +- sys/sparc64/pci/apb.c | 2 +- sys/sparc64/sbus/sbus.c | 2 +- sys/sparc64/sparc64/nexus.c | 2 +- sys/sparc64/sparc64/upa.c | 2 +- sys/sys/rman.h | 2 ++ sys/x86/isa/isa.c | 2 +- sys/x86/x86/nexus.c | 2 +- 43 files changed, 45 insertions(+), 42 deletions(-) diff --git a/sys/arm/at91/at91.c b/sys/arm/at91/at91.c index 0b0d9118c9f0..0947ec73c50e 100644 --- a/sys/arm/at91/at91.c +++ b/sys/arm/at91/at91.c @@ -164,7 +164,7 @@ at91_alloc_resource(device_t dev, device_t child, int type, int *rid, return (NULL); if (rle->res) panic("Resource rid %d type %d already in use", *rid, type); - if (start == 0UL && end == ~0UL) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); diff --git a/sys/arm/at91/at91_pinctrl.c b/sys/arm/at91/at91_pinctrl.c index 7a90f0f80d15..e5652d1792c9 100644 --- a/sys/arm/at91/at91_pinctrl.c +++ b/sys/arm/at91/at91_pinctrl.c @@ -280,7 +280,7 @@ pinctrl_alloc_resource(device_t bus, device_t child, int type, int *rid, * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); diff --git a/sys/arm/cavium/cns11xx/econa.c b/sys/arm/cavium/cns11xx/econa.c index 29f09044d2ce..d3dbf062ffbb 100644 --- a/sys/arm/cavium/cns11xx/econa.c +++ b/sys/arm/cavium/cns11xx/econa.c @@ -425,7 +425,7 @@ econa_alloc_resource(device_t dev, device_t child, int type, int *rid, } if (rle->res) panic("Resource rid %d type %d already in use", *rid, type); - if (start == 0UL && end == ~0UL) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); diff --git a/sys/arm/mv/mv_localbus.c b/sys/arm/mv/mv_localbus.c index f7a80fe0e721..936bc18fe87b 100644 --- a/sys/arm/mv/mv_localbus.c +++ b/sys/arm/mv/mv_localbus.c @@ -341,7 +341,7 @@ localbus_alloc_resource(device_t bus, device_t child, int type, int *rid, * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); diff --git a/sys/arm/mv/mv_pci.c b/sys/arm/mv/mv_pci.c index 49d095d1820e..dc8b890e305d 100644 --- a/sys/arm/mv/mv_pci.c +++ b/sys/arm/mv/mv_pci.c @@ -844,7 +844,7 @@ mv_pcib_alloc_resource(device_t dev, device_t child, int type, int *rid, type, rid, start, end, count, flags)); }; - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { start = sc->sc_mem_base; end = sc->sc_mem_base + sc->sc_mem_size - 1; count = sc->sc_mem_size; diff --git a/sys/arm64/arm64/gic_fdt.c b/sys/arm64/arm64/gic_fdt.c index 075f1d6b8523..34d8009ad223 100644 --- a/sys/arm64/arm64/gic_fdt.c +++ b/sys/arm64/arm64/gic_fdt.c @@ -211,7 +211,7 @@ arm_gic_fdt_alloc_resource(device_t bus, device_t child, int type, int *rid, * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); diff --git a/sys/arm64/arm64/gic_v3_fdt.c b/sys/arm64/arm64/gic_v3_fdt.c index 6c8de4929caf..e5d75c3c0db9 100644 --- a/sys/arm64/arm64/gic_v3_fdt.c +++ b/sys/arm64/arm64/gic_v3_fdt.c @@ -35,6 +35,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include @@ -180,7 +181,7 @@ gic_v3_ofw_bus_alloc_res(device_t bus, device_t child, int type, int *rid, struct resource_list_entry *rle; int ranges_len; - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type != SYS_RES_MEMORY) diff --git a/sys/arm64/arm64/nexus.c b/sys/arm64/arm64/nexus.c index c56c7aa83439..94b904873409 100644 --- a/sys/arm64/arm64/nexus.c +++ b/sys/arm64/arm64/nexus.c @@ -223,7 +223,7 @@ nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ - if ((start == 0UL) && (end == ~0UL) && (count == 1)) { + if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); diff --git a/sys/arm64/cavium/thunder_pcie.c b/sys/arm64/cavium/thunder_pcie.c index ca2fd6f7ea68..b4ac43c88ef4 100644 --- a/sys/arm64/cavium/thunder_pcie.c +++ b/sys/arm64/cavium/thunder_pcie.c @@ -292,7 +292,7 @@ thunder_pcie_alloc_resource(device_t dev, device_t child, int type, int *rid, type, rid, start, end, count, flags)); }; - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { /* Read BAR manually to get resource address and size */ pci_read_bar(child, *rid, &map, &testval, NULL); diff --git a/sys/arm64/cavium/thunder_pcie_fdt.c b/sys/arm64/cavium/thunder_pcie_fdt.c index f1624f615484..09b56602d74a 100644 --- a/sys/arm64/cavium/thunder_pcie_fdt.c +++ b/sys/arm64/cavium/thunder_pcie_fdt.c @@ -283,7 +283,7 @@ thunder_pcie_ofw_bus_alloc_res(device_t bus, device_t child, int type, int *rid, sc = device_get_softc(bus); - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) diff --git a/sys/arm64/cavium/thunder_pcie_pem.c b/sys/arm64/cavium/thunder_pcie_pem.c index a7138dc70870..0f1e6a31b7fb 100644 --- a/sys/arm64/cavium/thunder_pcie_pem.c +++ b/sys/arm64/cavium/thunder_pcie_pem.c @@ -435,7 +435,7 @@ thunder_pem_alloc_resource(device_t dev, device_t child, int type, int *rid, end, count, flags)); }; - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { device_printf(dev, "Cannot allocate resource with unspecified range\n"); goto fail; diff --git a/sys/dev/acpica/acpi.c b/sys/dev/acpica/acpi.c index 4565b5b55e6b..91472d1d8f1a 100644 --- a/sys/dev/acpica/acpi.c +++ b/sys/dev/acpica/acpi.c @@ -1334,7 +1334,7 @@ acpi_alloc_resource(device_t bus, device_t child, int type, int *rid, struct resource_list_entry *rle; struct resource_list *rl; struct resource *res; - int isdefault = (start == 0UL && end == ~0UL); + int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); /* * First attempt at allocating the resource. For direct children, diff --git a/sys/dev/eisa/eisaconf.c b/sys/dev/eisa/eisaconf.c index ada184676401..28a9bc622de9 100644 --- a/sys/dev/eisa/eisaconf.c +++ b/sys/dev/eisa/eisaconf.c @@ -359,7 +359,7 @@ eisa_alloc_resource(device_t dev, device_t child, int type, int *rid, struct resource *rv, **rvp = 0; isdefault = (device_get_parent(child) == dev && - start == 0UL && end == ~0UL && count == 1); + RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); switch (type) { case SYS_RES_IRQ: diff --git a/sys/dev/fdt/simplebus.c b/sys/dev/fdt/simplebus.c index 42096d28fe0d..36c278f388cf 100644 --- a/sys/dev/fdt/simplebus.c +++ b/sys/dev/fdt/simplebus.c @@ -335,7 +335,7 @@ simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); diff --git a/sys/dev/gpio/gpiobus.c b/sys/dev/gpio/gpiobus.c index e3755de29599..44ab581497ec 100644 --- a/sys/dev/gpio/gpiobus.c +++ b/sys/dev/gpio/gpiobus.c @@ -516,7 +516,7 @@ gpiobus_alloc_resource(device_t bus, device_t child, int type, int *rid, if (type != SYS_RES_IRQ) return (NULL); - isdefault = (start == 0UL && end == ~0UL && count == 1); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); rle = NULL; if (isdefault) { rl = BUS_GET_RESOURCE_LIST(bus, child); diff --git a/sys/dev/mca/mca_bus.c b/sys/dev/mca/mca_bus.c index f97fd24ac2dc..55ac9ed77fd9 100644 --- a/sys/dev/mca/mca_bus.c +++ b/sys/dev/mca/mca_bus.c @@ -463,7 +463,7 @@ mca_alloc_resource (device_t dev, device_t child, int type, int *rid, int isdefault; int passthrough; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != dev); if (!passthrough && !isdefault) { diff --git a/sys/dev/ofw/ofwbus.c b/sys/dev/ofw/ofwbus.c index 23e604a47dad..8eb5dd51833c 100644 --- a/sys/dev/ofw/ofwbus.c +++ b/sys/dev/ofw/ofwbus.c @@ -186,7 +186,7 @@ ofwbus_alloc_resource(device_t bus, device_t child, int type, int *rid, struct resource_list_entry *rle; int isdefault, passthrough; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); sc = device_get_softc(bus); rle = NULL; diff --git a/sys/dev/pccard/pccard.c b/sys/dev/pccard/pccard.c index b3c2e32e61eb..746b50947d92 100644 --- a/sys/dev/pccard/pccard.c +++ b/sys/dev/pccard/pccard.c @@ -1137,7 +1137,7 @@ pccard_alloc_resource(device_t dev, device_t child, int type, int *rid, struct pccard_ivar *dinfo; struct resource_list_entry *rle = 0; int passthrough = (device_get_parent(child) != dev); - int isdefault = (start == 0 && end == ~0UL && count == 1); + int isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); struct resource *r = NULL; /* XXX I'm no longer sure this is right */ diff --git a/sys/dev/siba/siba.c b/sys/dev/siba/siba.c index 8c3898c7b8bd..3489ddc807fd 100644 --- a/sys/dev/siba/siba.c +++ b/sys/dev/siba/siba.c @@ -383,7 +383,7 @@ siba_alloc_resource(device_t bus, device_t child, int type, int *rid, printf("%s: entry\n", __func__); #endif - isdefault = (start == 0UL && end == ~0UL && count == 1); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); needactivate = flags & RF_ACTIVE; rl = BUS_GET_RESOURCE_LIST(bus, child); rle = NULL; diff --git a/sys/dev/vnic/mrml_bridge.c b/sys/dev/vnic/mrml_bridge.c index edc15717c4ae..cab8da50ce03 100644 --- a/sys/dev/vnic/mrml_bridge.c +++ b/sys/dev/vnic/mrml_bridge.c @@ -139,7 +139,7 @@ mrmlb_ofw_bus_alloc_res(device_t bus, device_t child, int type, int *rid, struct resource_list_entry *rle; int i; - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); if (type == SYS_RES_IOPORT) diff --git a/sys/kern/subr_bus.c b/sys/kern/subr_bus.c index 22cddea6ed74..a144031867aa 100644 --- a/sys/kern/subr_bus.c +++ b/sys/kern/subr_bus.c @@ -3311,7 +3311,7 @@ resource_list_alloc(struct resource_list *rl, device_t bus, device_t child, { struct resource_list_entry *rle = NULL; int passthrough = (device_get_parent(child) != bus); - int isdefault = (start == 0UL && end == ~0UL); + int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (passthrough) { return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, diff --git a/sys/mips/adm5120/obio.c b/sys/mips/adm5120/obio.c index 46c09b7ab8ee..8b62629c977a 100644 --- a/sys/mips/adm5120/obio.c +++ b/sys/mips/adm5120/obio.c @@ -231,7 +231,7 @@ obio_alloc_resource(device_t bus, device_t child, int type, int *rid, struct rman *rm; int isdefault, needactivate, passthrough; - isdefault = (start == 0UL && end == ~0UL && count == 1); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; diff --git a/sys/mips/alchemy/obio.c b/sys/mips/alchemy/obio.c index 3c0fecbdac16..96b1f1d75b39 100644 --- a/sys/mips/alchemy/obio.c +++ b/sys/mips/alchemy/obio.c @@ -232,7 +232,7 @@ obio_alloc_resource(device_t bus, device_t child, int type, int *rid, struct rman *rm; int isdefault, needactivate, passthrough; - isdefault = (start == 0UL && end == ~0UL && count == 1); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; diff --git a/sys/mips/atheros/apb.c b/sys/mips/atheros/apb.c index c072567854c8..8d230a7f1bf6 100644 --- a/sys/mips/atheros/apb.c +++ b/sys/mips/atheros/apb.c @@ -170,7 +170,7 @@ apb_alloc_resource(device_t bus, device_t child, int type, int *rid, struct rman *rm; int isdefault, needactivate, passthrough; - isdefault = (start == 0UL && end == ~0UL); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end)); needactivate = flags & RF_ACTIVE; /* * Pass memory requests to nexus device diff --git a/sys/mips/beri/beri_simplebus.c b/sys/mips/beri/beri_simplebus.c index 31771a5617f2..dd219a5e1282 100644 --- a/sys/mips/beri/beri_simplebus.c +++ b/sys/mips/beri/beri_simplebus.c @@ -260,7 +260,7 @@ simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, * Request for the default allocation with a given rid: use resource * list stored in the local device info. */ - if ((start == 0UL) && (end == ~0UL)) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { if ((di = device_get_ivars(child)) == NULL) return (NULL); diff --git a/sys/mips/idt/obio.c b/sys/mips/idt/obio.c index c8b4fef677ff..b95b11ac66cf 100644 --- a/sys/mips/idt/obio.c +++ b/sys/mips/idt/obio.c @@ -165,7 +165,7 @@ obio_alloc_resource(device_t bus, device_t child, int type, int *rid, struct rman *rm; int isdefault, needactivate, passthrough; - isdefault = (start == 0UL && end == ~0UL); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end)); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; diff --git a/sys/mips/mips/nexus.c b/sys/mips/mips/nexus.c index 1e3d4093cb6f..ba7db318df88 100644 --- a/sys/mips/mips/nexus.c +++ b/sys/mips/mips/nexus.c @@ -281,7 +281,7 @@ nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, (void *)(intptr_t)end, count, flags); dprintf("%s: requested rid is %d\n", __func__, *rid); - isdefault = (start == 0UL && end == ~0UL && count == 1); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; diff --git a/sys/mips/nlm/xlp_simplebus.c b/sys/mips/nlm/xlp_simplebus.c index 7d2f697d83a0..790955bb70f6 100644 --- a/sys/mips/nlm/xlp_simplebus.c +++ b/sys/mips/nlm/xlp_simplebus.c @@ -192,7 +192,7 @@ xlp_simplebus_alloc_resource(device_t bus, device_t child, int type, int *rid, bustag = NULL; if (!passthrough) { - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); if (isdefault) { rle = resource_list_find(&di->rl, type, *rid); if (rle == NULL) diff --git a/sys/mips/rt305x/obio.c b/sys/mips/rt305x/obio.c index 34cec413fd25..ff7ba037dbe9 100644 --- a/sys/mips/rt305x/obio.c +++ b/sys/mips/rt305x/obio.c @@ -287,7 +287,7 @@ obio_alloc_resource(device_t bus, device_t child, int type, int *rid, struct rman *rm; int isdefault, needactivate, passthrough; - isdefault = (start == 0UL && end == ~0UL && count == 1); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; diff --git a/sys/mips/sibyte/sb_zbbus.c b/sys/mips/sibyte/sb_zbbus.c index 28584217de03..f87b6b8bceef 100644 --- a/sys/mips/sibyte/sb_zbbus.c +++ b/sys/mips/sibyte/sb_zbbus.c @@ -288,7 +288,7 @@ zbbus_alloc_resource(device_t bus, device_t child, int type, int *rid, struct resource_list_entry *rle; struct zbbus_devinfo *dinfo; - isdefault = (start == 0UL && end == ~0UL && count == 1); + isdefault = (RMAN_IS_DEFAULT_RANGE(start, end) && count == 1); /* * Our direct child is asking for a default resource allocation. diff --git a/sys/powerpc/mpc85xx/isa.c b/sys/powerpc/mpc85xx/isa.c index 1020e775117e..e25a75cc4d16 100644 --- a/sys/powerpc/mpc85xx/isa.c +++ b/sys/powerpc/mpc85xx/isa.c @@ -52,7 +52,7 @@ isa_alloc_resource(device_t bus, device_t child, int type, int *rid, struct resource_list *rl = &idev->id_resources; int isdefault, passthrough, rids; - isdefault = (start == 0UL && end == ~0UL) ? 1 : 0; + isdefault = RMAN_IS_DEFAULT_RANGE(start, end) ? 1 : 0; passthrough = (device_get_parent(child) != bus) ? 1 : 0; if (!passthrough && !isdefault && diff --git a/sys/riscv/riscv/nexus.c b/sys/riscv/riscv/nexus.c index 83b57959edab..8d862a1481dc 100644 --- a/sys/riscv/riscv/nexus.c +++ b/sys/riscv/riscv/nexus.c @@ -215,7 +215,7 @@ nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ - if ((start == 0UL) && (end == ~0UL) && (count == 1)) { + if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); diff --git a/sys/sparc64/central/central.c b/sys/sparc64/central/central.c index 62fa79d364f1..15876f09d326 100644 --- a/sys/sparc64/central/central.c +++ b/sys/sparc64/central/central.c @@ -228,7 +228,7 @@ central_alloc_resource(device_t bus, device_t child, int type, int *rid, int passthrough; int i; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); res = NULL; rle = NULL; diff --git a/sys/sparc64/ebus/ebus.c b/sys/sparc64/ebus/ebus.c index 961dd07f9b37..a53b20b85b24 100644 --- a/sys/sparc64/ebus/ebus.c +++ b/sys/sparc64/ebus/ebus.c @@ -438,7 +438,7 @@ ebus_alloc_resource(device_t bus, device_t child, int type, int *rid, uint64_t cend, cstart, offset; int i, isdefault, passthrough, ridx; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); sc = device_get_softc(bus); rl = BUS_GET_RESOURCE_LIST(bus, child); diff --git a/sys/sparc64/fhc/fhc.c b/sys/sparc64/fhc/fhc.c index 3de81ff2b162..b9d8bc668721 100644 --- a/sys/sparc64/fhc/fhc.c +++ b/sys/sparc64/fhc/fhc.c @@ -433,7 +433,7 @@ fhc_alloc_resource(device_t bus, device_t child, int type, int *rid, int passthrough; int i; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); res = NULL; rle = NULL; diff --git a/sys/sparc64/isa/isa.c b/sys/sparc64/isa/isa.c index ad4bcdb1d1d7..74627c5d3081 100644 --- a/sys/sparc64/isa/isa.c +++ b/sys/sparc64/isa/isa.c @@ -279,7 +279,7 @@ isa_alloc_resource(device_t bus, device_t child, int type, int *rid, * Consider adding a resource definition. */ int passthrough = (device_get_parent(child) != bus); - int isdefault = (start == 0UL && end == ~0UL); + int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); struct resource_list *rl; struct resource_list_entry *rle; u_long base, limit; diff --git a/sys/sparc64/pci/apb.c b/sys/sparc64/pci/apb.c index c2fe50828e46..ba3643c13c0c 100644 --- a/sys/sparc64/pci/apb.c +++ b/sys/sparc64/pci/apb.c @@ -238,7 +238,7 @@ apb_alloc_resource(device_t dev, device_t child, int type, int *rid, * out where it's coming from (we should actually never see these) so * we just have to punt. */ - if (start == 0 && end == ~0) { + if (RMAN_IS_DEFAULT_RANGE(start, end)) { device_printf(dev, "can't decode default resource id %d for " "%s, bypassing\n", *rid, device_get_nameunit(child)); goto passup; diff --git a/sys/sparc64/sbus/sbus.c b/sys/sparc64/sbus/sbus.c index e4bc3b464e0c..af46c7720a35 100644 --- a/sys/sparc64/sbus/sbus.c +++ b/sys/sparc64/sbus/sbus.c @@ -723,7 +723,7 @@ sbus_alloc_resource(device_t bus, device_t child, int type, int *rid, int i, slot; int isdefault, passthrough; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); rle = NULL; sc = device_get_softc(bus); diff --git a/sys/sparc64/sparc64/nexus.c b/sys/sparc64/sparc64/nexus.c index 67a954b6e6fe..8115f07433ab 100644 --- a/sys/sparc64/sparc64/nexus.c +++ b/sys/sparc64/sparc64/nexus.c @@ -370,7 +370,7 @@ nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, device_t nexus; int isdefault, passthrough; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != bus); nexus = bus; while (strcmp(device_get_name(device_get_parent(nexus)), "root") != 0) diff --git a/sys/sparc64/sparc64/upa.c b/sys/sparc64/sparc64/upa.c index f3a73bc2afc3..504d001a37fd 100644 --- a/sys/sparc64/sparc64/upa.c +++ b/sys/sparc64/sparc64/upa.c @@ -412,7 +412,7 @@ upa_alloc_resource(device_t dev, device_t child, int type, int *rid, bus_addr_t cend, cstart; int i, isdefault, passthrough; - isdefault = (start == 0UL && end == ~0UL); + isdefault = RMAN_IS_DEFAULT_RANGE(start, end); passthrough = (device_get_parent(child) != dev); sc = device_get_softc(dev); rl = BUS_GET_RESOURCE_LIST(dev, child); diff --git a/sys/sys/rman.h b/sys/sys/rman.h index 9ea7bbabe0fb..76db6307cada 100644 --- a/sys/sys/rman.h +++ b/sys/sys/rman.h @@ -63,6 +63,8 @@ enum rman_type { RMAN_UNINIT = 0, RMAN_GAUGE, RMAN_ARRAY }; #define RM_MAX_END ((rman_res_t)~0) +#define RMAN_IS_DEFAULT_RANGE(s,e) ((s) == 0 && (e) == RM_MAX_END) + /* * Userspace-exported structures. */ diff --git a/sys/x86/isa/isa.c b/sys/x86/isa/isa.c index 3de119f0a68a..e0f8ef1bdaef 100644 --- a/sys/x86/isa/isa.c +++ b/sys/x86/isa/isa.c @@ -94,7 +94,7 @@ isa_alloc_resource(device_t bus, device_t child, int type, int *rid, * Consider adding a resource definition. */ int passthrough = (device_get_parent(child) != bus); - int isdefault = (start == 0UL && end == ~0UL); + int isdefault = RMAN_IS_DEFAULT_RANGE(start, end); struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; diff --git a/sys/x86/x86/nexus.c b/sys/x86/x86/nexus.c index 6b61b39236df..9f68e506e2ee 100644 --- a/sys/x86/x86/nexus.c +++ b/sys/x86/x86/nexus.c @@ -377,7 +377,7 @@ nexus_alloc_resource(device_t bus, device_t child, int type, int *rid, * (ie. they aren't maintained by a child bus), then work out * the start/end values. */ - if ((start == 0UL) && (end == ~0UL) && (count == 1)) { + if (RMAN_IS_DEFAULT_RANGE(start, end) && (count == 1)) { if (device_get_parent(child) != bus || ndev == NULL) return(NULL); rle = resource_list_find(&ndev->nx_resources, type, *rid); -- cgit v1.2.3 From e903856979f28a9549f5735af37de1efcc21192a Mon Sep 17 00:00:00 2001 From: Justin Hibbits Date: Sat, 20 Feb 2016 01:34:13 +0000 Subject: Fix the definition of RM_MAX_END. Even though casting from signed to unsigned is well-defined in C, it's better to first cast to the larger unsigned type, then negate. --- sys/sys/rman.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/sys/rman.h b/sys/sys/rman.h index 76db6307cada..2d58f4a21dd2 100644 --- a/sys/sys/rman.h +++ b/sys/sys/rman.h @@ -61,7 +61,7 @@ enum rman_type { RMAN_UNINIT = 0, RMAN_GAUGE, RMAN_ARRAY }; */ #define RM_TEXTLEN 32 -#define RM_MAX_END ((rman_res_t)~0) +#define RM_MAX_END (~(rman_res_t)0) #define RMAN_IS_DEFAULT_RANGE(s,e) ((s) == 0 && (e) == RM_MAX_END) -- cgit v1.2.3 From c4a4eb59e3159973f1b0bb3efb151d873f3fd79a Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Sat, 20 Feb 2016 07:45:21 +0000 Subject: Rename busdma_machdep.c to busdma_machdep-v4.c, pmap.c to pmap-v4.c and trap.c to trap-v4.c to be plain and consistent with other armv4 specific files. --- sys/arm/arm/busdma_machdep-v4.c | 1609 +++++++++++++ sys/arm/arm/busdma_machdep.c | 1609 ------------- sys/arm/arm/pmap-v4.c | 4874 +++++++++++++++++++++++++++++++++++++++ sys/arm/arm/pmap.c | 4874 --------------------------------------- sys/arm/arm/trap-v4.c | 737 ++++++ sys/arm/arm/trap.c | 737 ------ sys/conf/files.arm | 6 +- 7 files changed, 7223 insertions(+), 7223 deletions(-) create mode 100644 sys/arm/arm/busdma_machdep-v4.c delete mode 100644 sys/arm/arm/busdma_machdep.c create mode 100644 sys/arm/arm/pmap-v4.c delete mode 100644 sys/arm/arm/pmap.c create mode 100644 sys/arm/arm/trap-v4.c delete mode 100644 sys/arm/arm/trap.c diff --git a/sys/arm/arm/busdma_machdep-v4.c b/sys/arm/arm/busdma_machdep-v4.c new file mode 100644 index 000000000000..f7e0e261a097 --- /dev/null +++ b/sys/arm/arm/busdma_machdep-v4.c @@ -0,0 +1,1609 @@ +/*- + * Copyright (c) 2012 Ian Lepore + * Copyright (c) 2004 Olivier Houchard + * Copyright (c) 2002 Peter Grehan + * Copyright (c) 1997, 1998 Justin T. Gibbs. + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions, and the following disclaimer, + * without modification, immediately at the beginning of the file. + * 2. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS + * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred + */ + +#include +__FBSDID("$FreeBSD$"); + +/* + * ARM bus dma support routines. + * + * XXX Things to investigate / fix some day... + * - What is the earliest that this API can be called? Could there be any + * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM? + * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the + * bus_dmamap_load() function. This code has historically (and still does) + * honor it in bus_dmamem_alloc(). If we got rid of that we could lose some + * error checking because some resource management calls would become WAITOK + * and thus "cannot fail." + * - The decisions made by _bus_dma_can_bounce() should be made once, at tag + * creation time, and the result stored in the tag. + * - It should be possible to take some shortcuts when mapping a buffer we know + * came from the uma(9) allocators based on what we know about such buffers + * (aligned, contiguous, etc). + * - The allocation of bounce pages could probably be cleaned up, then we could + * retire arm_remap_nocache(). + */ + +#define _ARM32_BUS_DMA_PRIVATE +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#define MAX_BPAGES 64 +#define MAX_DMA_SEGMENTS 4096 +#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 +#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 + +struct bounce_zone; + +struct bus_dma_tag { + bus_dma_tag_t parent; + bus_size_t alignment; + bus_addr_t boundary; + bus_addr_t lowaddr; + bus_addr_t highaddr; + bus_dma_filter_t *filter; + void *filterarg; + bus_size_t maxsize; + u_int nsegments; + bus_size_t maxsegsz; + int flags; + int ref_count; + int map_count; + bus_dma_lock_t *lockfunc; + void *lockfuncarg; + struct bounce_zone *bounce_zone; + /* + * DMA range for this tag. If the page doesn't fall within + * one of these ranges, an error is returned. The caller + * may then decide what to do with the transfer. If the + * range pointer is NULL, it is ignored. + */ + struct arm32_dma_range *ranges; + int _nranges; +}; + +struct bounce_page { + vm_offset_t vaddr; /* kva of bounce buffer */ + bus_addr_t busaddr; /* Physical address */ + vm_offset_t datavaddr; /* kva of client data */ + vm_page_t datapage; /* physical page of client data */ + vm_offset_t dataoffs; /* page offset of client data */ + bus_size_t datacount; /* client data count */ + STAILQ_ENTRY(bounce_page) links; +}; + +struct sync_list { + vm_offset_t vaddr; /* kva of client data */ + vm_page_t pages; /* starting page of client data */ + vm_offset_t dataoffs; /* page offset of client data */ + bus_size_t datacount; /* client data count */ +}; + +int busdma_swi_pending; + +struct bounce_zone { + STAILQ_ENTRY(bounce_zone) links; + STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; + int total_bpages; + int free_bpages; + int reserved_bpages; + int active_bpages; + int total_bounced; + int total_deferred; + int map_count; + bus_size_t alignment; + bus_addr_t lowaddr; + char zoneid[8]; + char lowaddrid[20]; + struct sysctl_ctx_list sysctl_tree; + struct sysctl_oid *sysctl_tree_top; +}; + +static struct mtx bounce_lock; +static int total_bpages; +static int busdma_zonecount; +static uint32_t tags_total; +static uint32_t maps_total; +static uint32_t maps_dmamem; +static uint32_t maps_coherent; +static counter_u64_t maploads_total; +static counter_u64_t maploads_bounced; +static counter_u64_t maploads_coherent; +static counter_u64_t maploads_dmamem; +static counter_u64_t maploads_mbuf; +static counter_u64_t maploads_physmem; + +static STAILQ_HEAD(, bounce_zone) bounce_zone_list; + +SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); +SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, + "Number of active tags"); +SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, + "Number of active maps"); +SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, + "Number of active maps for bus_dmamem_alloc buffers"); +SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, + "Number of active maps with BUS_DMA_COHERENT flag set"); +SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, + &maploads_total, "Number of load operations performed"); +SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, + &maploads_bounced, "Number of load operations that used bounce buffers"); +SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, + &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory"); +SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, + &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers"); +SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, + &maploads_mbuf, "Number of load operations for mbufs"); +SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, + &maploads_physmem, "Number of load operations on physical buffers"); +SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, + "Total bounce pages"); + +struct bus_dmamap { + struct bp_list bpages; + int pagesneeded; + int pagesreserved; + bus_dma_tag_t dmat; + struct memdesc mem; + bus_dmamap_callback_t *callback; + void *callback_arg; + int flags; +#define DMAMAP_COHERENT (1 << 0) +#define DMAMAP_DMAMEM_ALLOC (1 << 1) +#define DMAMAP_MBUF (1 << 2) +#define DMAMAP_CACHE_ALIGNED (1 << 3) + STAILQ_ENTRY(bus_dmamap) links; + bus_dma_segment_t *segments; + int sync_count; + struct sync_list slist[]; +}; + +static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; +static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; + +static void init_bounce_pages(void *dummy); +static int alloc_bounce_zone(bus_dma_tag_t dmat); +static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); +static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, + int commit); +static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, + vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); +static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); +static void bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, + int bufaligned); + +/* + * ---------------------------------------------------------------------------- + * Begin block of code useful to transplant to other implementations. + */ + +static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ +static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ + +MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); +MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); + +static void +busdma_init(void *dummy) +{ + + maploads_total = counter_u64_alloc(M_WAITOK); + maploads_bounced = counter_u64_alloc(M_WAITOK); + maploads_coherent = counter_u64_alloc(M_WAITOK); + maploads_dmamem = counter_u64_alloc(M_WAITOK); + maploads_mbuf = counter_u64_alloc(M_WAITOK); + maploads_physmem = counter_u64_alloc(M_WAITOK); + + /* Create a cache of buffers in standard (cacheable) memory. */ + standard_allocator = busdma_bufalloc_create("buffer", + arm_dcache_align, /* minimum_alignment */ + NULL, /* uma_alloc func */ + NULL, /* uma_free func */ + 0); /* uma_zcreate_flags */ + + /* + * Create a cache of buffers in uncacheable memory, to implement the + * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. + */ + coherent_allocator = busdma_bufalloc_create("coherent", + arm_dcache_align, /* minimum_alignment */ + busdma_bufalloc_alloc_uncacheable, + busdma_bufalloc_free_uncacheable, + 0); /* uma_zcreate_flags */ +} + +/* + * This init historically used SI_SUB_VM, but now the init code requires + * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get + * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by + * using SI_SUB_KMEM+1. + */ +SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL); + +/* + * End block of code useful to transplant to other implementations. + * ---------------------------------------------------------------------------- + */ + +/* + * Return true if a match is made. + * + * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. + * + * If paddr is within the bounds of the dma tag then call the filter callback + * to check for a match, if there is no filter callback then assume a match. + */ +static int +run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) +{ + int retval; + + retval = 0; + + do { + if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) + || ((paddr & (dmat->alignment - 1)) != 0)) + && (dmat->filter == NULL + || (*dmat->filter)(dmat->filterarg, paddr) != 0)) + retval = 1; + + dmat = dmat->parent; + } while (retval == 0 && dmat != NULL); + return (retval); +} + +/* + * This routine checks the exclusion zone constraints from a tag against the + * physical RAM available on the machine. If a tag specifies an exclusion zone + * but there's no RAM in that zone, then we avoid allocating resources to bounce + * a request, and we can use any memory allocator (as opposed to needing + * kmem_alloc_contig() just because it can allocate pages in an address range). + * + * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the + * same value on 32-bit architectures) as their lowaddr constraint, and we can't + * possibly have RAM at an address higher than the highest address we can + * express, so we take a fast out. + */ +static __inline int +_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) +{ + int i; + + if (lowaddr >= BUS_SPACE_MAXADDR) + return (0); + + for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { + if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) + || (lowaddr < phys_avail[i] && + highaddr > phys_avail[i])) + return (1); + } + return (0); +} + +static __inline struct arm32_dma_range * +_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, + bus_addr_t curaddr) +{ + struct arm32_dma_range *dr; + int i; + + for (i = 0, dr = ranges; i < nranges; i++, dr++) { + if (curaddr >= dr->dr_sysbase && + round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) + return (dr); + } + + return (NULL); +} + +/* + * Convenience function for manipulating driver locks from busdma (during + * busdma_swi, for example). Drivers that don't provide their own locks + * should specify &Giant to dmat->lockfuncarg. Drivers that use their own + * non-mutex locking scheme don't have to use this at all. + */ +void +busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) +{ + struct mtx *dmtx; + + dmtx = (struct mtx *)arg; + switch (op) { + case BUS_DMA_LOCK: + mtx_lock(dmtx); + break; + case BUS_DMA_UNLOCK: + mtx_unlock(dmtx); + break; + default: + panic("Unknown operation 0x%x for busdma_lock_mutex!", op); + } +} + +/* + * dflt_lock should never get called. It gets put into the dma tag when + * lockfunc == NULL, which is only valid if the maps that are associated + * with the tag are meant to never be defered. + * XXX Should have a way to identify which driver is responsible here. + */ +static void +dflt_lock(void *arg, bus_dma_lock_op_t op) +{ +#ifdef INVARIANTS + panic("driver error: busdma dflt_lock called"); +#else + printf("DRIVER_ERROR: busdma dflt_lock called\n"); +#endif +} + +/* + * Allocate a device specific dma_tag. + */ +int +bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, + bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, + bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, + int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, + void *lockfuncarg, bus_dma_tag_t *dmat) +{ + bus_dma_tag_t newtag; + int error = 0; + /* Return a NULL tag on failure */ + *dmat = NULL; + + newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT); + if (newtag == NULL) { + CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", + __func__, newtag, 0, error); + return (ENOMEM); + } + + newtag->parent = parent; + newtag->alignment = alignment ? alignment : 1; + newtag->boundary = boundary; + newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); + newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); + newtag->filter = filter; + newtag->filterarg = filterarg; + newtag->maxsize = maxsize; + newtag->nsegments = nsegments; + newtag->maxsegsz = maxsegsz; + newtag->flags = flags; + newtag->ref_count = 1; /* Count ourself */ + newtag->map_count = 0; + newtag->ranges = bus_dma_get_range(); + newtag->_nranges = bus_dma_get_range_nb(); + if (lockfunc != NULL) { + newtag->lockfunc = lockfunc; + newtag->lockfuncarg = lockfuncarg; + } else { + newtag->lockfunc = dflt_lock; + newtag->lockfuncarg = NULL; + } + + /* Take into account any restrictions imposed by our parent tag */ + if (parent != NULL) { + newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); + newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); + if (newtag->boundary == 0) + newtag->boundary = parent->boundary; + else if (parent->boundary != 0) + newtag->boundary = MIN(parent->boundary, + newtag->boundary); + if ((newtag->filter != NULL) || + ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) + newtag->flags |= BUS_DMA_COULD_BOUNCE; + if (newtag->filter == NULL) { + /* + * Short circuit looking at our parent directly + * since we have encapsulated all of its information + */ + newtag->filter = parent->filter; + newtag->filterarg = parent->filterarg; + newtag->parent = parent->parent; + } + if (newtag->parent != NULL) + atomic_add_int(&parent->ref_count, 1); + } + if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) + || newtag->alignment > 1) + newtag->flags |= BUS_DMA_COULD_BOUNCE; + + if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && + (flags & BUS_DMA_ALLOCNOW) != 0) { + struct bounce_zone *bz; + + /* Must bounce */ + + if ((error = alloc_bounce_zone(newtag)) != 0) { + free(newtag, M_BUSDMA); + return (error); + } + bz = newtag->bounce_zone; + + if (ptoa(bz->total_bpages) < maxsize) { + int pages; + + pages = atop(maxsize) - bz->total_bpages; + + /* Add pages to our bounce pool */ + if (alloc_bounce_pages(newtag, pages) < pages) + error = ENOMEM; + } + /* Performed initial allocation */ + newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; + } else + newtag->bounce_zone = NULL; + + if (error != 0) { + free(newtag, M_BUSDMA); + } else { + atomic_add_32(&tags_total, 1); + *dmat = newtag; + } + CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", + __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); + return (error); +} + +int +bus_dma_tag_destroy(bus_dma_tag_t dmat) +{ + bus_dma_tag_t dmat_copy; + int error; + + error = 0; + dmat_copy = dmat; + + if (dmat != NULL) { + + if (dmat->map_count != 0) { + error = EBUSY; + goto out; + } + + while (dmat != NULL) { + bus_dma_tag_t parent; + + parent = dmat->parent; + atomic_subtract_int(&dmat->ref_count, 1); + if (dmat->ref_count == 0) { + atomic_subtract_32(&tags_total, 1); + free(dmat, M_BUSDMA); + /* + * Last reference count, so + * release our reference + * count on our parent. + */ + dmat = parent; + } else + dmat = NULL; + } + } +out: + CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); + return (error); +} + +static int +allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + int error; + + /* + * Bouncing might be required if the driver asks for an active + * exclusion region, a data alignment that is stricter than 1, and/or + * an active address boundary. + */ + if (dmat->flags & BUS_DMA_COULD_BOUNCE) { + + /* Must bounce */ + struct bounce_zone *bz; + int maxpages; + + if (dmat->bounce_zone == NULL) { + if ((error = alloc_bounce_zone(dmat)) != 0) { + return (error); + } + } + bz = dmat->bounce_zone; + + /* Initialize the new map */ + STAILQ_INIT(&(map->bpages)); + + /* + * Attempt to add pages to our pool on a per-instance + * basis up to a sane limit. + */ + maxpages = MAX_BPAGES; + if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 + || (bz->map_count > 0 && bz->total_bpages < maxpages)) { + int pages; + + pages = MAX(atop(dmat->maxsize), 1); + pages = MIN(maxpages - bz->total_bpages, pages); + pages = MAX(pages, 1); + if (alloc_bounce_pages(dmat, pages) < pages) + return (ENOMEM); + + if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) + dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; + } + bz->map_count++; + } + return (0); +} + +static bus_dmamap_t +allocate_map(bus_dma_tag_t dmat, int mflags) +{ + int mapsize, segsize; + bus_dmamap_t map; + + /* + * Allocate the map. The map structure ends with an embedded + * variable-sized array of sync_list structures. Following that + * we allocate enough extra space to hold the array of bus_dma_segments. + */ + KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, + ("cannot allocate %u dma segments (max is %u)", + dmat->nsegments, MAX_DMA_SEGMENTS)); + segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; + mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; + map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); + if (map == NULL) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); + return (NULL); + } + map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); + return (map); +} + +/* + * Allocate a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +int +bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) +{ + bus_dmamap_t map; + int error = 0; + + *mapp = map = allocate_map(dmat, M_NOWAIT); + if (map == NULL) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); + return (ENOMEM); + } + + /* + * Bouncing might be required if the driver asks for an exclusion + * region, a data alignment that is stricter than 1, or DMA that begins + * or ends with a partial cacheline. Whether bouncing will actually + * happen can't be known until mapping time, but we need to pre-allocate + * resources now because we might not be allowed to at mapping time. + */ + error = allocate_bz_and_pages(dmat, map); + if (error != 0) { + free(map, M_BUSDMA); + *mapp = NULL; + return (error); + } + if (map->flags & DMAMAP_COHERENT) + atomic_add_32(&maps_coherent, 1); + atomic_add_32(&maps_total, 1); + dmat->map_count++; + + return (0); +} + +/* + * Destroy a handle for mapping from kva/uva/physical + * address space into bus device space. + */ +int +bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + + if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { + CTR3(KTR_BUSDMA, "%s: tag %p error %d", + __func__, dmat, EBUSY); + return (EBUSY); + } + if (dmat->bounce_zone) + dmat->bounce_zone->map_count--; + if (map->flags & DMAMAP_COHERENT) + atomic_subtract_32(&maps_coherent, 1); + atomic_subtract_32(&maps_total, 1); + free(map, M_BUSDMA); + dmat->map_count--; + CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); + return (0); +} + +/* + * Allocate a piece of memory that can be efficiently mapped into bus device + * space based on the constraints listed in the dma tag. Returns a pointer to + * the allocated memory, and a pointer to an associated bus_dmamap. + */ +int +bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, + bus_dmamap_t *mapp) +{ + busdma_bufalloc_t ba; + struct busdma_bufzone *bufzone; + bus_dmamap_t map; + vm_memattr_t memattr; + int mflags; + + if (flags & BUS_DMA_NOWAIT) + mflags = M_NOWAIT; + else + mflags = M_WAITOK; + if (flags & BUS_DMA_ZERO) + mflags |= M_ZERO; + + *mapp = map = allocate_map(dmat, mflags); + if (map == NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->flags, ENOMEM); + return (ENOMEM); + } + map->flags = DMAMAP_DMAMEM_ALLOC; + + /* Choose a busdma buffer allocator based on memory type flags. */ + if (flags & BUS_DMA_COHERENT) { + memattr = VM_MEMATTR_UNCACHEABLE; + ba = coherent_allocator; + map->flags |= DMAMAP_COHERENT; + } else { + memattr = VM_MEMATTR_DEFAULT; + ba = standard_allocator; + } + + /* + * Try to find a bufzone in the allocator that holds a cache of buffers + * of the right size for this request. If the buffer is too big to be + * held in the allocator cache, this returns NULL. + */ + bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); + + /* + * Allocate the buffer from the uma(9) allocator if... + * - It's small enough to be in the allocator (bufzone not NULL). + * - The alignment constraint isn't larger than the allocation size + * (the allocator aligns buffers to their size boundaries). + * - There's no need to handle lowaddr/highaddr exclusion zones. + * else allocate non-contiguous pages if... + * - The page count that could get allocated doesn't exceed nsegments. + * - The alignment constraint isn't larger than a page boundary. + * - There are no boundary-crossing constraints. + * else allocate a block of contiguous pages because one or more of the + * constraints is something that only the contig allocator can fulfill. + */ + if (bufzone != NULL && dmat->alignment <= bufzone->size && + !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { + *vaddr = uma_zalloc(bufzone->umazone, mflags); + } else if (dmat->nsegments >= btoc(dmat->maxsize) && + dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { + *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, + mflags, 0, dmat->lowaddr, memattr); + } else { + *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, + mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, + memattr); + } + if (*vaddr == NULL) { + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->flags, ENOMEM); + free(map, M_BUSDMA); + *mapp = NULL; + return (ENOMEM); + } + if (map->flags & DMAMAP_COHERENT) + atomic_add_32(&maps_coherent, 1); + atomic_add_32(&maps_dmamem, 1); + atomic_add_32(&maps_total, 1); + dmat->map_count++; + + CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", + __func__, dmat, dmat->flags, 0); + return (0); +} + +/* + * Free a piece of memory that was allocated via bus_dmamem_alloc, along with + * its associated map. + */ +void +bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) +{ + struct busdma_bufzone *bufzone; + busdma_bufalloc_t ba; + + if (map->flags & DMAMAP_COHERENT) + ba = coherent_allocator; + else + ba = standard_allocator; + + bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); + + if (bufzone != NULL && dmat->alignment <= bufzone->size && + !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) + uma_zfree(bufzone->umazone, vaddr); + else + kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); + + dmat->map_count--; + if (map->flags & DMAMAP_COHERENT) + atomic_subtract_32(&maps_coherent, 1); + atomic_subtract_32(&maps_total, 1); + atomic_subtract_32(&maps_dmamem, 1); + free(map, M_BUSDMA); + CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); +} + +static void +_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags) +{ + bus_addr_t curaddr; + bus_size_t sgsize; + + if (map->pagesneeded == 0) { + CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", + dmat->lowaddr, dmat->boundary, dmat->alignment); + CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", + map, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + curaddr = buf; + while (buflen != 0) { + sgsize = MIN(buflen, dmat->maxsegsz); + if (run_filter(dmat, curaddr) != 0) { + sgsize = MIN(sgsize, + PAGE_SIZE - (curaddr & PAGE_MASK)); + map->pagesneeded++; + } + curaddr += sgsize; + buflen -= sgsize; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static void +_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, + void *buf, bus_size_t buflen, int flags) +{ + vm_offset_t vaddr; + vm_offset_t vendaddr; + bus_addr_t paddr; + + if (map->pagesneeded == 0) { + CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", + dmat->lowaddr, dmat->boundary, dmat->alignment); + CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", + map, map->pagesneeded); + /* + * Count the number of bounce pages + * needed in order to complete this transfer + */ + vaddr = trunc_page((vm_offset_t)buf); + vendaddr = (vm_offset_t)buf + buflen; + + while (vaddr < vendaddr) { + if (__predict_true(pmap == kernel_pmap)) + paddr = pmap_kextract(vaddr); + else + paddr = pmap_extract(pmap, vaddr); + if (run_filter(dmat, paddr) != 0) + map->pagesneeded++; + vaddr += PAGE_SIZE; + } + CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); + } +} + +static int +_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) +{ + + /* Reserve Necessary Bounce Pages */ + mtx_lock(&bounce_lock); + if (flags & BUS_DMA_NOWAIT) { + if (reserve_bounce_pages(dmat, map, 0) != 0) { + mtx_unlock(&bounce_lock); + return (ENOMEM); + } + } else { + if (reserve_bounce_pages(dmat, map, 1) != 0) { + /* Queue us for resources */ + STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); + mtx_unlock(&bounce_lock); + return (EINPROGRESS); + } + } + mtx_unlock(&bounce_lock); + + return (0); +} + +/* + * Add a single contiguous physical range to the segment list. + */ +static int +_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, + bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t baddr, bmask; + int seg; + + /* + * Make sure we don't cross any boundaries. + */ + bmask = ~(dmat->boundary - 1); + if (dmat->boundary > 0) { + baddr = (curaddr + dmat->boundary) & bmask; + if (sgsize > (baddr - curaddr)) + sgsize = (baddr - curaddr); + } + if (dmat->ranges) { + struct arm32_dma_range *dr; + + dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, + curaddr); + if (dr == NULL) + return (0); + /* + * In a valid DMA range. Translate the physical + * memory address to an address in the DMA window. + */ + curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; + + } + + seg = *segp; + /* + * Insert chunk into a segment, coalescing with + * the previous segment if possible. + */ + if (seg >= 0 && + curaddr == segs[seg].ds_addr + segs[seg].ds_len && + (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && + (dmat->boundary == 0 || + (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { + segs[seg].ds_len += sgsize; + } else { + if (++seg >= dmat->nsegments) + return (0); + segs[seg].ds_addr = curaddr; + segs[seg].ds_len = sgsize; + } + *segp = seg; + return (sgsize); +} + +/* + * Utility function to load a physical buffer. segp contains + * the starting segment on entrace, and the ending segment on exit. + */ +int +_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, + bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) +{ + bus_addr_t curaddr; + bus_addr_t sl_end = 0; + bus_size_t sgsize; + struct sync_list *sl; + int error; + + if (segs == NULL) + segs = map->segments; + + counter_u64_add(maploads_total, 1); + counter_u64_add(maploads_physmem, 1); + + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); + if (map->pagesneeded != 0) { + counter_u64_add(maploads_bounced, 1); + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + + sl = map->slist + map->sync_count - 1; + + while (buflen > 0) { + curaddr = buf; + sgsize = MIN(buflen, dmat->maxsegsz); + if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); + curaddr = add_bounce_page(dmat, map, 0, curaddr, + sgsize); + } else { + if (map->sync_count > 0) + sl_end = VM_PAGE_TO_PHYS(sl->pages) + + sl->dataoffs + sl->datacount; + + if (map->sync_count == 0 || curaddr != sl_end) { + if (++map->sync_count > dmat->nsegments) + break; + sl++; + sl->vaddr = 0; + sl->datacount = sgsize; + sl->pages = PHYS_TO_VM_PAGE(curaddr); + sl->dataoffs = curaddr & PAGE_MASK; + } else + sl->datacount += sgsize; + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + buf += sgsize; + buflen -= sgsize; + } + + /* + * Did we fit? + */ + if (buflen != 0) { + _bus_dmamap_unload(dmat, map); + return (EFBIG); /* XXX better return value here? */ + } + return (0); +} + +int +_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, + struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, + bus_dma_segment_t *segs, int *segp) +{ + + return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, + segs, segp)); +} + +/* + * Utility function to load a linear buffer. segp contains + * the starting segment on entrance, and the ending segment on exit. + */ +int +_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, + bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, + int *segp) +{ + bus_size_t sgsize; + bus_addr_t curaddr; + bus_addr_t sl_pend = 0; + struct sync_list *sl; + vm_offset_t kvaddr; + vm_offset_t vaddr = (vm_offset_t)buf; + vm_offset_t sl_vend = 0; + int error = 0; + + counter_u64_add(maploads_total, 1); + if (map->flags & DMAMAP_COHERENT) + counter_u64_add(maploads_coherent, 1); + if (map->flags & DMAMAP_DMAMEM_ALLOC) + counter_u64_add(maploads_dmamem, 1); + + if (segs == NULL) + segs = map->segments; + if (flags & BUS_DMA_LOAD_MBUF) { + counter_u64_add(maploads_mbuf, 1); + map->flags |= DMAMAP_CACHE_ALIGNED; + } + + if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { + _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); + if (map->pagesneeded != 0) { + counter_u64_add(maploads_bounced, 1); + error = _bus_dmamap_reserve_pages(dmat, map, flags); + if (error) + return (error); + } + } + CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " + "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); + + sl = map->slist + map->sync_count - 1; + + while (buflen > 0) { + /* + * Get the physical address for this segment. + */ + if (__predict_true(pmap == kernel_pmap)) { + curaddr = pmap_kextract(vaddr); + kvaddr = vaddr; + } else { + curaddr = pmap_extract(pmap, vaddr); + map->flags &= ~DMAMAP_COHERENT; + kvaddr = 0; + } + + /* + * Compute the segment size, and adjust counts. + */ + sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); + if (sgsize > dmat->maxsegsz) + sgsize = dmat->maxsegsz; + if (buflen < sgsize) + sgsize = buflen; + + if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && + map->pagesneeded != 0 && run_filter(dmat, curaddr)) { + curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, + sgsize); + } else { + if (map->sync_count > 0) { + sl_pend = VM_PAGE_TO_PHYS(sl->pages) + + sl->dataoffs + sl->datacount; + sl_vend = sl->vaddr + sl->datacount; + } + + if (map->sync_count == 0 || + (kvaddr != 0 && kvaddr != sl_vend) || + (kvaddr == 0 && curaddr != sl_pend)) { + + if (++map->sync_count > dmat->nsegments) + goto cleanup; + sl++; + sl->vaddr = kvaddr; + sl->datacount = sgsize; + sl->pages = PHYS_TO_VM_PAGE(curaddr); + sl->dataoffs = curaddr & PAGE_MASK; + } else + sl->datacount += sgsize; + } + sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, + segp); + if (sgsize == 0) + break; + vaddr += sgsize; + buflen -= sgsize; + } + +cleanup: + /* + * Did we fit? + */ + if (buflen != 0) { + _bus_dmamap_unload(dmat, map); + return (EFBIG); /* XXX better return value here? */ + } + return (0); +} + +void +__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, + bus_dmamap_callback_t *callback, void *callback_arg) +{ + + KASSERT(dmat != NULL, ("dmatag is NULL")); + KASSERT(map != NULL, ("dmamap is NULL")); + map->mem = *mem; + map->callback = callback; + map->callback_arg = callback_arg; +} + +bus_dma_segment_t * +_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, + bus_dma_segment_t *segs, int nsegs, int error) +{ + + if (segs == NULL) + segs = map->segments; + return (segs); +} + +/* + * Release the mapping held by map. + */ +void +_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) +{ + struct bounce_page *bpage; + struct bounce_zone *bz; + + if ((bz = dmat->bounce_zone) != NULL) { + while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { + STAILQ_REMOVE_HEAD(&map->bpages, links); + free_bounce_page(dmat, bpage); + } + + bz = dmat->bounce_zone; + bz->free_bpages += map->pagesreserved; + bz->reserved_bpages -= map->pagesreserved; + map->pagesreserved = 0; + map->pagesneeded = 0; + } + map->sync_count = 0; + map->flags &= ~DMAMAP_MBUF; +} + +static void +bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, + int bufaligned) +{ + char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; + register_t s; + int partial; + + if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { + cpu_dcache_wb_range(buf, len); + cpu_l2cache_wb_range(buf, len); + } + + /* + * If the caller promises the buffer is properly aligned to a cache line + * (even if the call parms make it look like it isn't) we can avoid + * attempting to preserve the non-DMA part of the cache line in the + * POSTREAD case, but we MUST still do a writeback in the PREREAD case. + * + * This covers the case of mbufs, where we know how they're aligned and + * know the CPU doesn't touch the header in front of the DMA data area + * during the IO, but it may have touched it right before invoking the + * sync, so a PREREAD writeback is required. + * + * It also handles buffers we created in bus_dmamem_alloc(), which are + * always aligned and padded to cache line size even if the IO length + * isn't a multiple of cache line size. In this case the PREREAD + * writeback probably isn't required, but it's harmless. + */ + partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask; + + if (op & BUS_DMASYNC_PREREAD) { + if (!(op & BUS_DMASYNC_PREWRITE) && !partial) { + cpu_dcache_inv_range(buf, len); + cpu_l2cache_inv_range(buf, len); + } else { + cpu_dcache_wbinv_range(buf, len); + cpu_l2cache_wbinv_range(buf, len); + } + } + if (op & BUS_DMASYNC_POSTREAD) { + if (partial && !bufaligned) { + s = intr_disable(); + if (buf & arm_dcache_align_mask) + memcpy(_tmp_cl, (void *)(buf & + ~arm_dcache_align_mask), + buf & arm_dcache_align_mask); + if ((buf + len) & arm_dcache_align_mask) + memcpy(_tmp_clend, + (void *)(buf + len), + arm_dcache_align - + ((buf + len) & arm_dcache_align_mask)); + } + cpu_dcache_inv_range(buf, len); + cpu_l2cache_inv_range(buf, len); + if (partial && !bufaligned) { + if (buf & arm_dcache_align_mask) + memcpy((void *)(buf & + ~arm_dcache_align_mask), _tmp_cl, + buf & arm_dcache_align_mask); + if ((buf + len) & arm_dcache_align_mask) + memcpy((void *)(buf + len), + _tmp_clend, arm_dcache_align - + ((buf + len) & arm_dcache_align_mask)); + intr_restore(s); + } + } +} + +static void +bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, + int bufaligned) +{ + vm_offset_t tempvaddr; + vm_page_t curpage; + size_t npages; + + if (sl->vaddr != 0) { + bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, bufaligned); + return; + } + + tempvaddr = 0; + npages = atop(round_page(sl->dataoffs + sl->datacount)); + + for (curpage = sl->pages; curpage != sl->pages + npages; ++curpage) { + /* + * If the page is mapped to some other VA that hasn't + * been supplied to busdma, then pmap_quick_enter_page() + * will find all duplicate mappings and mark them + * uncacheable. + * That will also do any necessary wb/inv. Otherwise, + * if the page is truly unmapped, then we don't actually + * need to do cache maintenance. + * XXX: May overwrite DMA'ed data in the POSTREAD + * case where the CPU has written to a cacheline not + * completely covered by the DMA region. + */ + KASSERT(VM_PAGE_TO_PHYS(curpage) == VM_PAGE_TO_PHYS(sl->pages) + + ptoa(curpage - sl->pages), + ("unexpected vm_page_t phys: 0x%08x != 0x%08x", + VM_PAGE_TO_PHYS(curpage), VM_PAGE_TO_PHYS(sl->pages) + + ptoa(curpage - sl->pages))); + tempvaddr = pmap_quick_enter_page(curpage); + pmap_quick_remove_page(tempvaddr); + } +} + +static void +_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) +{ + struct bounce_page *bpage; + vm_offset_t datavaddr, tempvaddr; + + if ((op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)) == 0) + return; + + STAILQ_FOREACH(bpage, &map->bpages, links) { + tempvaddr = 0; + datavaddr = bpage->datavaddr; + if (op & BUS_DMASYNC_PREWRITE) { + if (datavaddr == 0) { + tempvaddr = + pmap_quick_enter_page(bpage->datapage); + datavaddr = tempvaddr | bpage->dataoffs; + } + bcopy((void *)datavaddr, + (void *)bpage->vaddr, bpage->datacount); + if (tempvaddr != 0) + pmap_quick_remove_page(tempvaddr); + cpu_dcache_wb_range(bpage->vaddr, bpage->datacount); + cpu_l2cache_wb_range(bpage->vaddr, bpage->datacount); + dmat->bounce_zone->total_bounced++; + } + if (op & BUS_DMASYNC_POSTREAD) { + cpu_dcache_inv_range(bpage->vaddr, bpage->datacount); + cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount); + if (datavaddr == 0) { + tempvaddr = + pmap_quick_enter_page(bpage->datapage); + datavaddr = tempvaddr | bpage->dataoffs; + } + bcopy((void *)bpage->vaddr, + (void *)datavaddr, bpage->datacount); + if (tempvaddr != 0) + pmap_quick_remove_page(tempvaddr); + dmat->bounce_zone->total_bounced++; + } + } +} + +void +_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) +{ + struct sync_list *sl, *end; + int bufaligned; + + if (op == BUS_DMASYNC_POSTWRITE) + return; + if (map->flags & DMAMAP_COHERENT) + goto drain; + if (STAILQ_FIRST(&map->bpages)) + _bus_dmamap_sync_bp(dmat, map, op); + CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); + bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED); + if (map->sync_count) { + end = &map->slist[map->sync_count]; + for (sl = &map->slist[0]; sl != end; sl++) + bus_dmamap_sync_sl(sl, op, bufaligned); + } + +drain: + + cpu_drain_writebuf(); +} + +static void +init_bounce_pages(void *dummy __unused) +{ + + total_bpages = 0; + STAILQ_INIT(&bounce_zone_list); + STAILQ_INIT(&bounce_map_waitinglist); + STAILQ_INIT(&bounce_map_callbacklist); + mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); +} +SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); + +static struct sysctl_ctx_list * +busdma_sysctl_tree(struct bounce_zone *bz) +{ + + return (&bz->sysctl_tree); +} + +static struct sysctl_oid * +busdma_sysctl_tree_top(struct bounce_zone *bz) +{ + + return (bz->sysctl_tree_top); +} + +static int +alloc_bounce_zone(bus_dma_tag_t dmat) +{ + struct bounce_zone *bz; + + /* Check to see if we already have a suitable zone */ + STAILQ_FOREACH(bz, &bounce_zone_list, links) { + if ((dmat->alignment <= bz->alignment) && + (dmat->lowaddr >= bz->lowaddr)) { + dmat->bounce_zone = bz; + return (0); + } + } + + if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, + M_NOWAIT | M_ZERO)) == NULL) + return (ENOMEM); + + STAILQ_INIT(&bz->bounce_page_list); + bz->free_bpages = 0; + bz->reserved_bpages = 0; + bz->active_bpages = 0; + bz->lowaddr = dmat->lowaddr; + bz->alignment = MAX(dmat->alignment, PAGE_SIZE); + bz->map_count = 0; + snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); + busdma_zonecount++; + snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); + STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); + dmat->bounce_zone = bz; + + sysctl_ctx_init(&bz->sysctl_tree); + bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, + SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, + CTLFLAG_RD, 0, ""); + if (bz->sysctl_tree_top == NULL) { + sysctl_ctx_free(&bz->sysctl_tree); + return (0); /* XXX error code? */ + } + + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, + "Total bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, + "Free bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, + "Reserved bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, + "Active bounce pages"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, + "Total bounce requests (pages bounced)"); + SYSCTL_ADD_INT(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, + "Total bounce requests that were deferred"); + SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); + SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz), + SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, + "alignment", CTLFLAG_RD, &bz->alignment, ""); + + return (0); +} + +static int +alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) +{ + struct bounce_zone *bz; + int count; + + bz = dmat->bounce_zone; + count = 0; + while (numpages > 0) { + struct bounce_page *bpage; + + bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA, + M_NOWAIT | M_ZERO); + + if (bpage == NULL) + break; + bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, + M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); + if (bpage->vaddr == 0) { + free(bpage, M_BUSDMA); + break; + } + bpage->busaddr = pmap_kextract(bpage->vaddr); + mtx_lock(&bounce_lock); + STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); + total_bpages++; + bz->total_bpages++; + bz->free_bpages++; + mtx_unlock(&bounce_lock); + count++; + numpages--; + } + return (count); +} + +static int +reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) +{ + struct bounce_zone *bz; + int pages; + + mtx_assert(&bounce_lock, MA_OWNED); + bz = dmat->bounce_zone; + pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); + if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) + return (map->pagesneeded - (map->pagesreserved + pages)); + bz->free_bpages -= pages; + bz->reserved_bpages += pages; + map->pagesreserved += pages; + pages = map->pagesneeded - map->pagesreserved; + + return (pages); +} + +static bus_addr_t +add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, + bus_addr_t addr, bus_size_t size) +{ + struct bounce_zone *bz; + struct bounce_page *bpage; + + KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); + KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); + + bz = dmat->bounce_zone; + if (map->pagesneeded == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesneeded--; + + if (map->pagesreserved == 0) + panic("add_bounce_page: map doesn't need any pages"); + map->pagesreserved--; + + mtx_lock(&bounce_lock); + bpage = STAILQ_FIRST(&bz->bounce_page_list); + if (bpage == NULL) + panic("add_bounce_page: free page list is empty"); + + STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); + bz->reserved_bpages--; + bz->active_bpages++; + mtx_unlock(&bounce_lock); + + if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { + /* Page offset needs to be preserved. */ + bpage->vaddr |= addr & PAGE_MASK; + bpage->busaddr |= addr & PAGE_MASK; + } + bpage->datavaddr = vaddr; + bpage->datapage = PHYS_TO_VM_PAGE(addr); + bpage->dataoffs = addr & PAGE_MASK; + bpage->datacount = size; + STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); + return (bpage->busaddr); +} + +static void +free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) +{ + struct bus_dmamap *map; + struct bounce_zone *bz; + + bz = dmat->bounce_zone; + bpage->datavaddr = 0; + bpage->datacount = 0; + if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { + /* + * Reset the bounce page to start at offset 0. Other uses + * of this bounce page may need to store a full page of + * data and/or assume it starts on a page boundary. + */ + bpage->vaddr &= ~PAGE_MASK; + bpage->busaddr &= ~PAGE_MASK; + } + + mtx_lock(&bounce_lock); + STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); + bz->free_bpages++; + bz->active_bpages--; + if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { + if (reserve_bounce_pages(map->dmat, map, 1) == 0) { + STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); + STAILQ_INSERT_TAIL(&bounce_map_callbacklist, + map, links); + busdma_swi_pending = 1; + bz->total_deferred++; + swi_sched(vm_ih, 0); + } + } + mtx_unlock(&bounce_lock); +} + +void +busdma_swi(void) +{ + bus_dma_tag_t dmat; + struct bus_dmamap *map; + + mtx_lock(&bounce_lock); + while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { + STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); + mtx_unlock(&bounce_lock); + dmat = map->dmat; + dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); + bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, + map->callback_arg, BUS_DMA_WAITOK); + dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); + mtx_lock(&bounce_lock); + } + mtx_unlock(&bounce_lock); +} diff --git a/sys/arm/arm/busdma_machdep.c b/sys/arm/arm/busdma_machdep.c deleted file mode 100644 index f7e0e261a097..000000000000 --- a/sys/arm/arm/busdma_machdep.c +++ /dev/null @@ -1,1609 +0,0 @@ -/*- - * Copyright (c) 2012 Ian Lepore - * Copyright (c) 2004 Olivier Houchard - * Copyright (c) 2002 Peter Grehan - * Copyright (c) 1997, 1998 Justin T. Gibbs. - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions, and the following disclaimer, - * without modification, immediately at the beginning of the file. - * 2. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR - * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * From i386/busdma_machdep.c,v 1.26 2002/04/19 22:58:09 alfred - */ - -#include -__FBSDID("$FreeBSD$"); - -/* - * ARM bus dma support routines. - * - * XXX Things to investigate / fix some day... - * - What is the earliest that this API can be called? Could there be any - * fallout from changing the SYSINIT() order from SI_SUB_VM to SI_SUB_KMEM? - * - The manpage mentions the BUS_DMA_NOWAIT flag only in the context of the - * bus_dmamap_load() function. This code has historically (and still does) - * honor it in bus_dmamem_alloc(). If we got rid of that we could lose some - * error checking because some resource management calls would become WAITOK - * and thus "cannot fail." - * - The decisions made by _bus_dma_can_bounce() should be made once, at tag - * creation time, and the result stored in the tag. - * - It should be possible to take some shortcuts when mapping a buffer we know - * came from the uma(9) allocators based on what we know about such buffers - * (aligned, contiguous, etc). - * - The allocation of bounce pages could probably be cleaned up, then we could - * retire arm_remap_nocache(). - */ - -#define _ARM32_BUS_DMA_PRIVATE -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#define MAX_BPAGES 64 -#define MAX_DMA_SEGMENTS 4096 -#define BUS_DMA_COULD_BOUNCE BUS_DMA_BUS3 -#define BUS_DMA_MIN_ALLOC_COMP BUS_DMA_BUS4 - -struct bounce_zone; - -struct bus_dma_tag { - bus_dma_tag_t parent; - bus_size_t alignment; - bus_addr_t boundary; - bus_addr_t lowaddr; - bus_addr_t highaddr; - bus_dma_filter_t *filter; - void *filterarg; - bus_size_t maxsize; - u_int nsegments; - bus_size_t maxsegsz; - int flags; - int ref_count; - int map_count; - bus_dma_lock_t *lockfunc; - void *lockfuncarg; - struct bounce_zone *bounce_zone; - /* - * DMA range for this tag. If the page doesn't fall within - * one of these ranges, an error is returned. The caller - * may then decide what to do with the transfer. If the - * range pointer is NULL, it is ignored. - */ - struct arm32_dma_range *ranges; - int _nranges; -}; - -struct bounce_page { - vm_offset_t vaddr; /* kva of bounce buffer */ - bus_addr_t busaddr; /* Physical address */ - vm_offset_t datavaddr; /* kva of client data */ - vm_page_t datapage; /* physical page of client data */ - vm_offset_t dataoffs; /* page offset of client data */ - bus_size_t datacount; /* client data count */ - STAILQ_ENTRY(bounce_page) links; -}; - -struct sync_list { - vm_offset_t vaddr; /* kva of client data */ - vm_page_t pages; /* starting page of client data */ - vm_offset_t dataoffs; /* page offset of client data */ - bus_size_t datacount; /* client data count */ -}; - -int busdma_swi_pending; - -struct bounce_zone { - STAILQ_ENTRY(bounce_zone) links; - STAILQ_HEAD(bp_list, bounce_page) bounce_page_list; - int total_bpages; - int free_bpages; - int reserved_bpages; - int active_bpages; - int total_bounced; - int total_deferred; - int map_count; - bus_size_t alignment; - bus_addr_t lowaddr; - char zoneid[8]; - char lowaddrid[20]; - struct sysctl_ctx_list sysctl_tree; - struct sysctl_oid *sysctl_tree_top; -}; - -static struct mtx bounce_lock; -static int total_bpages; -static int busdma_zonecount; -static uint32_t tags_total; -static uint32_t maps_total; -static uint32_t maps_dmamem; -static uint32_t maps_coherent; -static counter_u64_t maploads_total; -static counter_u64_t maploads_bounced; -static counter_u64_t maploads_coherent; -static counter_u64_t maploads_dmamem; -static counter_u64_t maploads_mbuf; -static counter_u64_t maploads_physmem; - -static STAILQ_HEAD(, bounce_zone) bounce_zone_list; - -SYSCTL_NODE(_hw, OID_AUTO, busdma, CTLFLAG_RD, 0, "Busdma parameters"); -SYSCTL_UINT(_hw_busdma, OID_AUTO, tags_total, CTLFLAG_RD, &tags_total, 0, - "Number of active tags"); -SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_total, CTLFLAG_RD, &maps_total, 0, - "Number of active maps"); -SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_dmamem, CTLFLAG_RD, &maps_dmamem, 0, - "Number of active maps for bus_dmamem_alloc buffers"); -SYSCTL_UINT(_hw_busdma, OID_AUTO, maps_coherent, CTLFLAG_RD, &maps_coherent, 0, - "Number of active maps with BUS_DMA_COHERENT flag set"); -SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_total, CTLFLAG_RD, - &maploads_total, "Number of load operations performed"); -SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_bounced, CTLFLAG_RD, - &maploads_bounced, "Number of load operations that used bounce buffers"); -SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_coherent, CTLFLAG_RD, - &maploads_dmamem, "Number of load operations on BUS_DMA_COHERENT memory"); -SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_dmamem, CTLFLAG_RD, - &maploads_dmamem, "Number of load operations on bus_dmamem_alloc buffers"); -SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_mbuf, CTLFLAG_RD, - &maploads_mbuf, "Number of load operations for mbufs"); -SYSCTL_COUNTER_U64(_hw_busdma, OID_AUTO, maploads_physmem, CTLFLAG_RD, - &maploads_physmem, "Number of load operations on physical buffers"); -SYSCTL_INT(_hw_busdma, OID_AUTO, total_bpages, CTLFLAG_RD, &total_bpages, 0, - "Total bounce pages"); - -struct bus_dmamap { - struct bp_list bpages; - int pagesneeded; - int pagesreserved; - bus_dma_tag_t dmat; - struct memdesc mem; - bus_dmamap_callback_t *callback; - void *callback_arg; - int flags; -#define DMAMAP_COHERENT (1 << 0) -#define DMAMAP_DMAMEM_ALLOC (1 << 1) -#define DMAMAP_MBUF (1 << 2) -#define DMAMAP_CACHE_ALIGNED (1 << 3) - STAILQ_ENTRY(bus_dmamap) links; - bus_dma_segment_t *segments; - int sync_count; - struct sync_list slist[]; -}; - -static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist; -static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist; - -static void init_bounce_pages(void *dummy); -static int alloc_bounce_zone(bus_dma_tag_t dmat); -static int alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages); -static int reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, - int commit); -static bus_addr_t add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, - vm_offset_t vaddr, bus_addr_t addr, bus_size_t size); -static void free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage); -static void bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, - int bufaligned); - -/* - * ---------------------------------------------------------------------------- - * Begin block of code useful to transplant to other implementations. - */ - -static busdma_bufalloc_t coherent_allocator; /* Cache of coherent buffers */ -static busdma_bufalloc_t standard_allocator; /* Cache of standard buffers */ - -MALLOC_DEFINE(M_BUSDMA, "busdma", "busdma metadata"); -MALLOC_DEFINE(M_BOUNCE, "bounce", "busdma bounce pages"); - -static void -busdma_init(void *dummy) -{ - - maploads_total = counter_u64_alloc(M_WAITOK); - maploads_bounced = counter_u64_alloc(M_WAITOK); - maploads_coherent = counter_u64_alloc(M_WAITOK); - maploads_dmamem = counter_u64_alloc(M_WAITOK); - maploads_mbuf = counter_u64_alloc(M_WAITOK); - maploads_physmem = counter_u64_alloc(M_WAITOK); - - /* Create a cache of buffers in standard (cacheable) memory. */ - standard_allocator = busdma_bufalloc_create("buffer", - arm_dcache_align, /* minimum_alignment */ - NULL, /* uma_alloc func */ - NULL, /* uma_free func */ - 0); /* uma_zcreate_flags */ - - /* - * Create a cache of buffers in uncacheable memory, to implement the - * BUS_DMA_COHERENT (and potentially BUS_DMA_NOCACHE) flag. - */ - coherent_allocator = busdma_bufalloc_create("coherent", - arm_dcache_align, /* minimum_alignment */ - busdma_bufalloc_alloc_uncacheable, - busdma_bufalloc_free_uncacheable, - 0); /* uma_zcreate_flags */ -} - -/* - * This init historically used SI_SUB_VM, but now the init code requires - * malloc(9) using M_BUSDMA memory and the pcpu zones for counter(9), which get - * set up by SI_SUB_KMEM and SI_ORDER_LAST, so we'll go right after that by - * using SI_SUB_KMEM+1. - */ -SYSINIT(busdma, SI_SUB_KMEM+1, SI_ORDER_FIRST, busdma_init, NULL); - -/* - * End block of code useful to transplant to other implementations. - * ---------------------------------------------------------------------------- - */ - -/* - * Return true if a match is made. - * - * To find a match walk the chain of bus_dma_tag_t's looking for 'paddr'. - * - * If paddr is within the bounds of the dma tag then call the filter callback - * to check for a match, if there is no filter callback then assume a match. - */ -static int -run_filter(bus_dma_tag_t dmat, bus_addr_t paddr) -{ - int retval; - - retval = 0; - - do { - if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr) - || ((paddr & (dmat->alignment - 1)) != 0)) - && (dmat->filter == NULL - || (*dmat->filter)(dmat->filterarg, paddr) != 0)) - retval = 1; - - dmat = dmat->parent; - } while (retval == 0 && dmat != NULL); - return (retval); -} - -/* - * This routine checks the exclusion zone constraints from a tag against the - * physical RAM available on the machine. If a tag specifies an exclusion zone - * but there's no RAM in that zone, then we avoid allocating resources to bounce - * a request, and we can use any memory allocator (as opposed to needing - * kmem_alloc_contig() just because it can allocate pages in an address range). - * - * Most tags have BUS_SPACE_MAXADDR or BUS_SPACE_MAXADDR_32BIT (they are the - * same value on 32-bit architectures) as their lowaddr constraint, and we can't - * possibly have RAM at an address higher than the highest address we can - * express, so we take a fast out. - */ -static __inline int -_bus_dma_can_bounce(vm_offset_t lowaddr, vm_offset_t highaddr) -{ - int i; - - if (lowaddr >= BUS_SPACE_MAXADDR) - return (0); - - for (i = 0; phys_avail[i] && phys_avail[i + 1]; i += 2) { - if ((lowaddr >= phys_avail[i] && lowaddr <= phys_avail[i + 1]) - || (lowaddr < phys_avail[i] && - highaddr > phys_avail[i])) - return (1); - } - return (0); -} - -static __inline struct arm32_dma_range * -_bus_dma_inrange(struct arm32_dma_range *ranges, int nranges, - bus_addr_t curaddr) -{ - struct arm32_dma_range *dr; - int i; - - for (i = 0, dr = ranges; i < nranges; i++, dr++) { - if (curaddr >= dr->dr_sysbase && - round_page(curaddr) <= (dr->dr_sysbase + dr->dr_len)) - return (dr); - } - - return (NULL); -} - -/* - * Convenience function for manipulating driver locks from busdma (during - * busdma_swi, for example). Drivers that don't provide their own locks - * should specify &Giant to dmat->lockfuncarg. Drivers that use their own - * non-mutex locking scheme don't have to use this at all. - */ -void -busdma_lock_mutex(void *arg, bus_dma_lock_op_t op) -{ - struct mtx *dmtx; - - dmtx = (struct mtx *)arg; - switch (op) { - case BUS_DMA_LOCK: - mtx_lock(dmtx); - break; - case BUS_DMA_UNLOCK: - mtx_unlock(dmtx); - break; - default: - panic("Unknown operation 0x%x for busdma_lock_mutex!", op); - } -} - -/* - * dflt_lock should never get called. It gets put into the dma tag when - * lockfunc == NULL, which is only valid if the maps that are associated - * with the tag are meant to never be defered. - * XXX Should have a way to identify which driver is responsible here. - */ -static void -dflt_lock(void *arg, bus_dma_lock_op_t op) -{ -#ifdef INVARIANTS - panic("driver error: busdma dflt_lock called"); -#else - printf("DRIVER_ERROR: busdma dflt_lock called\n"); -#endif -} - -/* - * Allocate a device specific dma_tag. - */ -int -bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment, - bus_addr_t boundary, bus_addr_t lowaddr, bus_addr_t highaddr, - bus_dma_filter_t *filter, void *filterarg, bus_size_t maxsize, - int nsegments, bus_size_t maxsegsz, int flags, bus_dma_lock_t *lockfunc, - void *lockfuncarg, bus_dma_tag_t *dmat) -{ - bus_dma_tag_t newtag; - int error = 0; - /* Return a NULL tag on failure */ - *dmat = NULL; - - newtag = (bus_dma_tag_t)malloc(sizeof(*newtag), M_BUSDMA, M_NOWAIT); - if (newtag == NULL) { - CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", - __func__, newtag, 0, error); - return (ENOMEM); - } - - newtag->parent = parent; - newtag->alignment = alignment ? alignment : 1; - newtag->boundary = boundary; - newtag->lowaddr = trunc_page((vm_offset_t)lowaddr) + (PAGE_SIZE - 1); - newtag->highaddr = trunc_page((vm_offset_t)highaddr) + (PAGE_SIZE - 1); - newtag->filter = filter; - newtag->filterarg = filterarg; - newtag->maxsize = maxsize; - newtag->nsegments = nsegments; - newtag->maxsegsz = maxsegsz; - newtag->flags = flags; - newtag->ref_count = 1; /* Count ourself */ - newtag->map_count = 0; - newtag->ranges = bus_dma_get_range(); - newtag->_nranges = bus_dma_get_range_nb(); - if (lockfunc != NULL) { - newtag->lockfunc = lockfunc; - newtag->lockfuncarg = lockfuncarg; - } else { - newtag->lockfunc = dflt_lock; - newtag->lockfuncarg = NULL; - } - - /* Take into account any restrictions imposed by our parent tag */ - if (parent != NULL) { - newtag->lowaddr = MIN(parent->lowaddr, newtag->lowaddr); - newtag->highaddr = MAX(parent->highaddr, newtag->highaddr); - if (newtag->boundary == 0) - newtag->boundary = parent->boundary; - else if (parent->boundary != 0) - newtag->boundary = MIN(parent->boundary, - newtag->boundary); - if ((newtag->filter != NULL) || - ((parent->flags & BUS_DMA_COULD_BOUNCE) != 0)) - newtag->flags |= BUS_DMA_COULD_BOUNCE; - if (newtag->filter == NULL) { - /* - * Short circuit looking at our parent directly - * since we have encapsulated all of its information - */ - newtag->filter = parent->filter; - newtag->filterarg = parent->filterarg; - newtag->parent = parent->parent; - } - if (newtag->parent != NULL) - atomic_add_int(&parent->ref_count, 1); - } - if (_bus_dma_can_bounce(newtag->lowaddr, newtag->highaddr) - || newtag->alignment > 1) - newtag->flags |= BUS_DMA_COULD_BOUNCE; - - if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) && - (flags & BUS_DMA_ALLOCNOW) != 0) { - struct bounce_zone *bz; - - /* Must bounce */ - - if ((error = alloc_bounce_zone(newtag)) != 0) { - free(newtag, M_BUSDMA); - return (error); - } - bz = newtag->bounce_zone; - - if (ptoa(bz->total_bpages) < maxsize) { - int pages; - - pages = atop(maxsize) - bz->total_bpages; - - /* Add pages to our bounce pool */ - if (alloc_bounce_pages(newtag, pages) < pages) - error = ENOMEM; - } - /* Performed initial allocation */ - newtag->flags |= BUS_DMA_MIN_ALLOC_COMP; - } else - newtag->bounce_zone = NULL; - - if (error != 0) { - free(newtag, M_BUSDMA); - } else { - atomic_add_32(&tags_total, 1); - *dmat = newtag; - } - CTR4(KTR_BUSDMA, "%s returned tag %p tag flags 0x%x error %d", - __func__, newtag, (newtag != NULL ? newtag->flags : 0), error); - return (error); -} - -int -bus_dma_tag_destroy(bus_dma_tag_t dmat) -{ - bus_dma_tag_t dmat_copy; - int error; - - error = 0; - dmat_copy = dmat; - - if (dmat != NULL) { - - if (dmat->map_count != 0) { - error = EBUSY; - goto out; - } - - while (dmat != NULL) { - bus_dma_tag_t parent; - - parent = dmat->parent; - atomic_subtract_int(&dmat->ref_count, 1); - if (dmat->ref_count == 0) { - atomic_subtract_32(&tags_total, 1); - free(dmat, M_BUSDMA); - /* - * Last reference count, so - * release our reference - * count on our parent. - */ - dmat = parent; - } else - dmat = NULL; - } - } -out: - CTR3(KTR_BUSDMA, "%s tag %p error %d", __func__, dmat_copy, error); - return (error); -} - -static int -allocate_bz_and_pages(bus_dma_tag_t dmat, bus_dmamap_t map) -{ - int error; - - /* - * Bouncing might be required if the driver asks for an active - * exclusion region, a data alignment that is stricter than 1, and/or - * an active address boundary. - */ - if (dmat->flags & BUS_DMA_COULD_BOUNCE) { - - /* Must bounce */ - struct bounce_zone *bz; - int maxpages; - - if (dmat->bounce_zone == NULL) { - if ((error = alloc_bounce_zone(dmat)) != 0) { - return (error); - } - } - bz = dmat->bounce_zone; - - /* Initialize the new map */ - STAILQ_INIT(&(map->bpages)); - - /* - * Attempt to add pages to our pool on a per-instance - * basis up to a sane limit. - */ - maxpages = MAX_BPAGES; - if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0 - || (bz->map_count > 0 && bz->total_bpages < maxpages)) { - int pages; - - pages = MAX(atop(dmat->maxsize), 1); - pages = MIN(maxpages - bz->total_bpages, pages); - pages = MAX(pages, 1); - if (alloc_bounce_pages(dmat, pages) < pages) - return (ENOMEM); - - if ((dmat->flags & BUS_DMA_MIN_ALLOC_COMP) == 0) - dmat->flags |= BUS_DMA_MIN_ALLOC_COMP; - } - bz->map_count++; - } - return (0); -} - -static bus_dmamap_t -allocate_map(bus_dma_tag_t dmat, int mflags) -{ - int mapsize, segsize; - bus_dmamap_t map; - - /* - * Allocate the map. The map structure ends with an embedded - * variable-sized array of sync_list structures. Following that - * we allocate enough extra space to hold the array of bus_dma_segments. - */ - KASSERT(dmat->nsegments <= MAX_DMA_SEGMENTS, - ("cannot allocate %u dma segments (max is %u)", - dmat->nsegments, MAX_DMA_SEGMENTS)); - segsize = sizeof(struct bus_dma_segment) * dmat->nsegments; - mapsize = sizeof(*map) + sizeof(struct sync_list) * dmat->nsegments; - map = malloc(mapsize + segsize, M_BUSDMA, mflags | M_ZERO); - if (map == NULL) { - CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); - return (NULL); - } - map->segments = (bus_dma_segment_t *)((uintptr_t)map + mapsize); - return (map); -} - -/* - * Allocate a handle for mapping from kva/uva/physical - * address space into bus device space. - */ -int -bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp) -{ - bus_dmamap_t map; - int error = 0; - - *mapp = map = allocate_map(dmat, M_NOWAIT); - if (map == NULL) { - CTR3(KTR_BUSDMA, "%s: tag %p error %d", __func__, dmat, ENOMEM); - return (ENOMEM); - } - - /* - * Bouncing might be required if the driver asks for an exclusion - * region, a data alignment that is stricter than 1, or DMA that begins - * or ends with a partial cacheline. Whether bouncing will actually - * happen can't be known until mapping time, but we need to pre-allocate - * resources now because we might not be allowed to at mapping time. - */ - error = allocate_bz_and_pages(dmat, map); - if (error != 0) { - free(map, M_BUSDMA); - *mapp = NULL; - return (error); - } - if (map->flags & DMAMAP_COHERENT) - atomic_add_32(&maps_coherent, 1); - atomic_add_32(&maps_total, 1); - dmat->map_count++; - - return (0); -} - -/* - * Destroy a handle for mapping from kva/uva/physical - * address space into bus device space. - */ -int -bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map) -{ - - if (STAILQ_FIRST(&map->bpages) != NULL || map->sync_count != 0) { - CTR3(KTR_BUSDMA, "%s: tag %p error %d", - __func__, dmat, EBUSY); - return (EBUSY); - } - if (dmat->bounce_zone) - dmat->bounce_zone->map_count--; - if (map->flags & DMAMAP_COHERENT) - atomic_subtract_32(&maps_coherent, 1); - atomic_subtract_32(&maps_total, 1); - free(map, M_BUSDMA); - dmat->map_count--; - CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat); - return (0); -} - -/* - * Allocate a piece of memory that can be efficiently mapped into bus device - * space based on the constraints listed in the dma tag. Returns a pointer to - * the allocated memory, and a pointer to an associated bus_dmamap. - */ -int -bus_dmamem_alloc(bus_dma_tag_t dmat, void **vaddr, int flags, - bus_dmamap_t *mapp) -{ - busdma_bufalloc_t ba; - struct busdma_bufzone *bufzone; - bus_dmamap_t map; - vm_memattr_t memattr; - int mflags; - - if (flags & BUS_DMA_NOWAIT) - mflags = M_NOWAIT; - else - mflags = M_WAITOK; - if (flags & BUS_DMA_ZERO) - mflags |= M_ZERO; - - *mapp = map = allocate_map(dmat, mflags); - if (map == NULL) { - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", - __func__, dmat, dmat->flags, ENOMEM); - return (ENOMEM); - } - map->flags = DMAMAP_DMAMEM_ALLOC; - - /* Choose a busdma buffer allocator based on memory type flags. */ - if (flags & BUS_DMA_COHERENT) { - memattr = VM_MEMATTR_UNCACHEABLE; - ba = coherent_allocator; - map->flags |= DMAMAP_COHERENT; - } else { - memattr = VM_MEMATTR_DEFAULT; - ba = standard_allocator; - } - - /* - * Try to find a bufzone in the allocator that holds a cache of buffers - * of the right size for this request. If the buffer is too big to be - * held in the allocator cache, this returns NULL. - */ - bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); - - /* - * Allocate the buffer from the uma(9) allocator if... - * - It's small enough to be in the allocator (bufzone not NULL). - * - The alignment constraint isn't larger than the allocation size - * (the allocator aligns buffers to their size boundaries). - * - There's no need to handle lowaddr/highaddr exclusion zones. - * else allocate non-contiguous pages if... - * - The page count that could get allocated doesn't exceed nsegments. - * - The alignment constraint isn't larger than a page boundary. - * - There are no boundary-crossing constraints. - * else allocate a block of contiguous pages because one or more of the - * constraints is something that only the contig allocator can fulfill. - */ - if (bufzone != NULL && dmat->alignment <= bufzone->size && - !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) { - *vaddr = uma_zalloc(bufzone->umazone, mflags); - } else if (dmat->nsegments >= btoc(dmat->maxsize) && - dmat->alignment <= PAGE_SIZE && dmat->boundary == 0) { - *vaddr = (void *)kmem_alloc_attr(kernel_arena, dmat->maxsize, - mflags, 0, dmat->lowaddr, memattr); - } else { - *vaddr = (void *)kmem_alloc_contig(kernel_arena, dmat->maxsize, - mflags, 0, dmat->lowaddr, dmat->alignment, dmat->boundary, - memattr); - } - if (*vaddr == NULL) { - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", - __func__, dmat, dmat->flags, ENOMEM); - free(map, M_BUSDMA); - *mapp = NULL; - return (ENOMEM); - } - if (map->flags & DMAMAP_COHERENT) - atomic_add_32(&maps_coherent, 1); - atomic_add_32(&maps_dmamem, 1); - atomic_add_32(&maps_total, 1); - dmat->map_count++; - - CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d", - __func__, dmat, dmat->flags, 0); - return (0); -} - -/* - * Free a piece of memory that was allocated via bus_dmamem_alloc, along with - * its associated map. - */ -void -bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map) -{ - struct busdma_bufzone *bufzone; - busdma_bufalloc_t ba; - - if (map->flags & DMAMAP_COHERENT) - ba = coherent_allocator; - else - ba = standard_allocator; - - bufzone = busdma_bufalloc_findzone(ba, dmat->maxsize); - - if (bufzone != NULL && dmat->alignment <= bufzone->size && - !_bus_dma_can_bounce(dmat->lowaddr, dmat->highaddr)) - uma_zfree(bufzone->umazone, vaddr); - else - kmem_free(kernel_arena, (vm_offset_t)vaddr, dmat->maxsize); - - dmat->map_count--; - if (map->flags & DMAMAP_COHERENT) - atomic_subtract_32(&maps_coherent, 1); - atomic_subtract_32(&maps_total, 1); - atomic_subtract_32(&maps_dmamem, 1); - free(map, M_BUSDMA); - CTR3(KTR_BUSDMA, "%s: tag %p flags 0x%x", __func__, dmat, dmat->flags); -} - -static void -_bus_dmamap_count_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int flags) -{ - bus_addr_t curaddr; - bus_size_t sgsize; - - if (map->pagesneeded == 0) { - CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", - dmat->lowaddr, dmat->boundary, dmat->alignment); - CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", - map, map->pagesneeded); - /* - * Count the number of bounce pages - * needed in order to complete this transfer - */ - curaddr = buf; - while (buflen != 0) { - sgsize = MIN(buflen, dmat->maxsegsz); - if (run_filter(dmat, curaddr) != 0) { - sgsize = MIN(sgsize, - PAGE_SIZE - (curaddr & PAGE_MASK)); - map->pagesneeded++; - } - curaddr += sgsize; - buflen -= sgsize; - } - CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); - } -} - -static void -_bus_dmamap_count_pages(bus_dma_tag_t dmat, bus_dmamap_t map, pmap_t pmap, - void *buf, bus_size_t buflen, int flags) -{ - vm_offset_t vaddr; - vm_offset_t vendaddr; - bus_addr_t paddr; - - if (map->pagesneeded == 0) { - CTR3(KTR_BUSDMA, "lowaddr= %d, boundary= %d, alignment= %d", - dmat->lowaddr, dmat->boundary, dmat->alignment); - CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", - map, map->pagesneeded); - /* - * Count the number of bounce pages - * needed in order to complete this transfer - */ - vaddr = trunc_page((vm_offset_t)buf); - vendaddr = (vm_offset_t)buf + buflen; - - while (vaddr < vendaddr) { - if (__predict_true(pmap == kernel_pmap)) - paddr = pmap_kextract(vaddr); - else - paddr = pmap_extract(pmap, vaddr); - if (run_filter(dmat, paddr) != 0) - map->pagesneeded++; - vaddr += PAGE_SIZE; - } - CTR1(KTR_BUSDMA, "pagesneeded= %d\n", map->pagesneeded); - } -} - -static int -_bus_dmamap_reserve_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int flags) -{ - - /* Reserve Necessary Bounce Pages */ - mtx_lock(&bounce_lock); - if (flags & BUS_DMA_NOWAIT) { - if (reserve_bounce_pages(dmat, map, 0) != 0) { - mtx_unlock(&bounce_lock); - return (ENOMEM); - } - } else { - if (reserve_bounce_pages(dmat, map, 1) != 0) { - /* Queue us for resources */ - STAILQ_INSERT_TAIL(&bounce_map_waitinglist, map, links); - mtx_unlock(&bounce_lock); - return (EINPROGRESS); - } - } - mtx_unlock(&bounce_lock); - - return (0); -} - -/* - * Add a single contiguous physical range to the segment list. - */ -static int -_bus_dmamap_addseg(bus_dma_tag_t dmat, bus_dmamap_t map, bus_addr_t curaddr, - bus_size_t sgsize, bus_dma_segment_t *segs, int *segp) -{ - bus_addr_t baddr, bmask; - int seg; - - /* - * Make sure we don't cross any boundaries. - */ - bmask = ~(dmat->boundary - 1); - if (dmat->boundary > 0) { - baddr = (curaddr + dmat->boundary) & bmask; - if (sgsize > (baddr - curaddr)) - sgsize = (baddr - curaddr); - } - if (dmat->ranges) { - struct arm32_dma_range *dr; - - dr = _bus_dma_inrange(dmat->ranges, dmat->_nranges, - curaddr); - if (dr == NULL) - return (0); - /* - * In a valid DMA range. Translate the physical - * memory address to an address in the DMA window. - */ - curaddr = (curaddr - dr->dr_sysbase) + dr->dr_busbase; - - } - - seg = *segp; - /* - * Insert chunk into a segment, coalescing with - * the previous segment if possible. - */ - if (seg >= 0 && - curaddr == segs[seg].ds_addr + segs[seg].ds_len && - (segs[seg].ds_len + sgsize) <= dmat->maxsegsz && - (dmat->boundary == 0 || - (segs[seg].ds_addr & bmask) == (curaddr & bmask))) { - segs[seg].ds_len += sgsize; - } else { - if (++seg >= dmat->nsegments) - return (0); - segs[seg].ds_addr = curaddr; - segs[seg].ds_len = sgsize; - } - *segp = seg; - return (sgsize); -} - -/* - * Utility function to load a physical buffer. segp contains - * the starting segment on entrace, and the ending segment on exit. - */ -int -_bus_dmamap_load_phys(bus_dma_tag_t dmat, bus_dmamap_t map, vm_paddr_t buf, - bus_size_t buflen, int flags, bus_dma_segment_t *segs, int *segp) -{ - bus_addr_t curaddr; - bus_addr_t sl_end = 0; - bus_size_t sgsize; - struct sync_list *sl; - int error; - - if (segs == NULL) - segs = map->segments; - - counter_u64_add(maploads_total, 1); - counter_u64_add(maploads_physmem, 1); - - if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - _bus_dmamap_count_phys(dmat, map, buf, buflen, flags); - if (map->pagesneeded != 0) { - counter_u64_add(maploads_bounced, 1); - error = _bus_dmamap_reserve_pages(dmat, map, flags); - if (error) - return (error); - } - } - - sl = map->slist + map->sync_count - 1; - - while (buflen > 0) { - curaddr = buf; - sgsize = MIN(buflen, dmat->maxsegsz); - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - sgsize = MIN(sgsize, PAGE_SIZE - (curaddr & PAGE_MASK)); - curaddr = add_bounce_page(dmat, map, 0, curaddr, - sgsize); - } else { - if (map->sync_count > 0) - sl_end = VM_PAGE_TO_PHYS(sl->pages) + - sl->dataoffs + sl->datacount; - - if (map->sync_count == 0 || curaddr != sl_end) { - if (++map->sync_count > dmat->nsegments) - break; - sl++; - sl->vaddr = 0; - sl->datacount = sgsize; - sl->pages = PHYS_TO_VM_PAGE(curaddr); - sl->dataoffs = curaddr & PAGE_MASK; - } else - sl->datacount += sgsize; - } - sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, - segp); - if (sgsize == 0) - break; - buf += sgsize; - buflen -= sgsize; - } - - /* - * Did we fit? - */ - if (buflen != 0) { - _bus_dmamap_unload(dmat, map); - return (EFBIG); /* XXX better return value here? */ - } - return (0); -} - -int -_bus_dmamap_load_ma(bus_dma_tag_t dmat, bus_dmamap_t map, - struct vm_page **ma, bus_size_t tlen, int ma_offs, int flags, - bus_dma_segment_t *segs, int *segp) -{ - - return (bus_dmamap_load_ma_triv(dmat, map, ma, tlen, ma_offs, flags, - segs, segp)); -} - -/* - * Utility function to load a linear buffer. segp contains - * the starting segment on entrance, and the ending segment on exit. - */ -int -_bus_dmamap_load_buffer(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf, - bus_size_t buflen, struct pmap *pmap, int flags, bus_dma_segment_t *segs, - int *segp) -{ - bus_size_t sgsize; - bus_addr_t curaddr; - bus_addr_t sl_pend = 0; - struct sync_list *sl; - vm_offset_t kvaddr; - vm_offset_t vaddr = (vm_offset_t)buf; - vm_offset_t sl_vend = 0; - int error = 0; - - counter_u64_add(maploads_total, 1); - if (map->flags & DMAMAP_COHERENT) - counter_u64_add(maploads_coherent, 1); - if (map->flags & DMAMAP_DMAMEM_ALLOC) - counter_u64_add(maploads_dmamem, 1); - - if (segs == NULL) - segs = map->segments; - if (flags & BUS_DMA_LOAD_MBUF) { - counter_u64_add(maploads_mbuf, 1); - map->flags |= DMAMAP_CACHE_ALIGNED; - } - - if ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) { - _bus_dmamap_count_pages(dmat, map, pmap, buf, buflen, flags); - if (map->pagesneeded != 0) { - counter_u64_add(maploads_bounced, 1); - error = _bus_dmamap_reserve_pages(dmat, map, flags); - if (error) - return (error); - } - } - CTR3(KTR_BUSDMA, "lowaddr= %d boundary= %d, " - "alignment= %d", dmat->lowaddr, dmat->boundary, dmat->alignment); - - sl = map->slist + map->sync_count - 1; - - while (buflen > 0) { - /* - * Get the physical address for this segment. - */ - if (__predict_true(pmap == kernel_pmap)) { - curaddr = pmap_kextract(vaddr); - kvaddr = vaddr; - } else { - curaddr = pmap_extract(pmap, vaddr); - map->flags &= ~DMAMAP_COHERENT; - kvaddr = 0; - } - - /* - * Compute the segment size, and adjust counts. - */ - sgsize = PAGE_SIZE - (curaddr & PAGE_MASK); - if (sgsize > dmat->maxsegsz) - sgsize = dmat->maxsegsz; - if (buflen < sgsize) - sgsize = buflen; - - if (((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0) && - map->pagesneeded != 0 && run_filter(dmat, curaddr)) { - curaddr = add_bounce_page(dmat, map, kvaddr, curaddr, - sgsize); - } else { - if (map->sync_count > 0) { - sl_pend = VM_PAGE_TO_PHYS(sl->pages) + - sl->dataoffs + sl->datacount; - sl_vend = sl->vaddr + sl->datacount; - } - - if (map->sync_count == 0 || - (kvaddr != 0 && kvaddr != sl_vend) || - (kvaddr == 0 && curaddr != sl_pend)) { - - if (++map->sync_count > dmat->nsegments) - goto cleanup; - sl++; - sl->vaddr = kvaddr; - sl->datacount = sgsize; - sl->pages = PHYS_TO_VM_PAGE(curaddr); - sl->dataoffs = curaddr & PAGE_MASK; - } else - sl->datacount += sgsize; - } - sgsize = _bus_dmamap_addseg(dmat, map, curaddr, sgsize, segs, - segp); - if (sgsize == 0) - break; - vaddr += sgsize; - buflen -= sgsize; - } - -cleanup: - /* - * Did we fit? - */ - if (buflen != 0) { - _bus_dmamap_unload(dmat, map); - return (EFBIG); /* XXX better return value here? */ - } - return (0); -} - -void -__bus_dmamap_waitok(bus_dma_tag_t dmat, bus_dmamap_t map, struct memdesc *mem, - bus_dmamap_callback_t *callback, void *callback_arg) -{ - - KASSERT(dmat != NULL, ("dmatag is NULL")); - KASSERT(map != NULL, ("dmamap is NULL")); - map->mem = *mem; - map->callback = callback; - map->callback_arg = callback_arg; -} - -bus_dma_segment_t * -_bus_dmamap_complete(bus_dma_tag_t dmat, bus_dmamap_t map, - bus_dma_segment_t *segs, int nsegs, int error) -{ - - if (segs == NULL) - segs = map->segments; - return (segs); -} - -/* - * Release the mapping held by map. - */ -void -_bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map) -{ - struct bounce_page *bpage; - struct bounce_zone *bz; - - if ((bz = dmat->bounce_zone) != NULL) { - while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) { - STAILQ_REMOVE_HEAD(&map->bpages, links); - free_bounce_page(dmat, bpage); - } - - bz = dmat->bounce_zone; - bz->free_bpages += map->pagesreserved; - bz->reserved_bpages -= map->pagesreserved; - map->pagesreserved = 0; - map->pagesneeded = 0; - } - map->sync_count = 0; - map->flags &= ~DMAMAP_MBUF; -} - -static void -bus_dmamap_sync_buf(vm_offset_t buf, int len, bus_dmasync_op_t op, - int bufaligned) -{ - char _tmp_cl[arm_dcache_align], _tmp_clend[arm_dcache_align]; - register_t s; - int partial; - - if ((op & BUS_DMASYNC_PREWRITE) && !(op & BUS_DMASYNC_PREREAD)) { - cpu_dcache_wb_range(buf, len); - cpu_l2cache_wb_range(buf, len); - } - - /* - * If the caller promises the buffer is properly aligned to a cache line - * (even if the call parms make it look like it isn't) we can avoid - * attempting to preserve the non-DMA part of the cache line in the - * POSTREAD case, but we MUST still do a writeback in the PREREAD case. - * - * This covers the case of mbufs, where we know how they're aligned and - * know the CPU doesn't touch the header in front of the DMA data area - * during the IO, but it may have touched it right before invoking the - * sync, so a PREREAD writeback is required. - * - * It also handles buffers we created in bus_dmamem_alloc(), which are - * always aligned and padded to cache line size even if the IO length - * isn't a multiple of cache line size. In this case the PREREAD - * writeback probably isn't required, but it's harmless. - */ - partial = (((vm_offset_t)buf) | len) & arm_dcache_align_mask; - - if (op & BUS_DMASYNC_PREREAD) { - if (!(op & BUS_DMASYNC_PREWRITE) && !partial) { - cpu_dcache_inv_range(buf, len); - cpu_l2cache_inv_range(buf, len); - } else { - cpu_dcache_wbinv_range(buf, len); - cpu_l2cache_wbinv_range(buf, len); - } - } - if (op & BUS_DMASYNC_POSTREAD) { - if (partial && !bufaligned) { - s = intr_disable(); - if (buf & arm_dcache_align_mask) - memcpy(_tmp_cl, (void *)(buf & - ~arm_dcache_align_mask), - buf & arm_dcache_align_mask); - if ((buf + len) & arm_dcache_align_mask) - memcpy(_tmp_clend, - (void *)(buf + len), - arm_dcache_align - - ((buf + len) & arm_dcache_align_mask)); - } - cpu_dcache_inv_range(buf, len); - cpu_l2cache_inv_range(buf, len); - if (partial && !bufaligned) { - if (buf & arm_dcache_align_mask) - memcpy((void *)(buf & - ~arm_dcache_align_mask), _tmp_cl, - buf & arm_dcache_align_mask); - if ((buf + len) & arm_dcache_align_mask) - memcpy((void *)(buf + len), - _tmp_clend, arm_dcache_align - - ((buf + len) & arm_dcache_align_mask)); - intr_restore(s); - } - } -} - -static void -bus_dmamap_sync_sl(struct sync_list *sl, bus_dmasync_op_t op, - int bufaligned) -{ - vm_offset_t tempvaddr; - vm_page_t curpage; - size_t npages; - - if (sl->vaddr != 0) { - bus_dmamap_sync_buf(sl->vaddr, sl->datacount, op, bufaligned); - return; - } - - tempvaddr = 0; - npages = atop(round_page(sl->dataoffs + sl->datacount)); - - for (curpage = sl->pages; curpage != sl->pages + npages; ++curpage) { - /* - * If the page is mapped to some other VA that hasn't - * been supplied to busdma, then pmap_quick_enter_page() - * will find all duplicate mappings and mark them - * uncacheable. - * That will also do any necessary wb/inv. Otherwise, - * if the page is truly unmapped, then we don't actually - * need to do cache maintenance. - * XXX: May overwrite DMA'ed data in the POSTREAD - * case where the CPU has written to a cacheline not - * completely covered by the DMA region. - */ - KASSERT(VM_PAGE_TO_PHYS(curpage) == VM_PAGE_TO_PHYS(sl->pages) + - ptoa(curpage - sl->pages), - ("unexpected vm_page_t phys: 0x%08x != 0x%08x", - VM_PAGE_TO_PHYS(curpage), VM_PAGE_TO_PHYS(sl->pages) + - ptoa(curpage - sl->pages))); - tempvaddr = pmap_quick_enter_page(curpage); - pmap_quick_remove_page(tempvaddr); - } -} - -static void -_bus_dmamap_sync_bp(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) -{ - struct bounce_page *bpage; - vm_offset_t datavaddr, tempvaddr; - - if ((op & (BUS_DMASYNC_PREWRITE | BUS_DMASYNC_POSTREAD)) == 0) - return; - - STAILQ_FOREACH(bpage, &map->bpages, links) { - tempvaddr = 0; - datavaddr = bpage->datavaddr; - if (op & BUS_DMASYNC_PREWRITE) { - if (datavaddr == 0) { - tempvaddr = - pmap_quick_enter_page(bpage->datapage); - datavaddr = tempvaddr | bpage->dataoffs; - } - bcopy((void *)datavaddr, - (void *)bpage->vaddr, bpage->datacount); - if (tempvaddr != 0) - pmap_quick_remove_page(tempvaddr); - cpu_dcache_wb_range(bpage->vaddr, bpage->datacount); - cpu_l2cache_wb_range(bpage->vaddr, bpage->datacount); - dmat->bounce_zone->total_bounced++; - } - if (op & BUS_DMASYNC_POSTREAD) { - cpu_dcache_inv_range(bpage->vaddr, bpage->datacount); - cpu_l2cache_inv_range(bpage->vaddr, bpage->datacount); - if (datavaddr == 0) { - tempvaddr = - pmap_quick_enter_page(bpage->datapage); - datavaddr = tempvaddr | bpage->dataoffs; - } - bcopy((void *)bpage->vaddr, - (void *)datavaddr, bpage->datacount); - if (tempvaddr != 0) - pmap_quick_remove_page(tempvaddr); - dmat->bounce_zone->total_bounced++; - } - } -} - -void -_bus_dmamap_sync(bus_dma_tag_t dmat, bus_dmamap_t map, bus_dmasync_op_t op) -{ - struct sync_list *sl, *end; - int bufaligned; - - if (op == BUS_DMASYNC_POSTWRITE) - return; - if (map->flags & DMAMAP_COHERENT) - goto drain; - if (STAILQ_FIRST(&map->bpages)) - _bus_dmamap_sync_bp(dmat, map, op); - CTR3(KTR_BUSDMA, "%s: op %x flags %x", __func__, op, map->flags); - bufaligned = (map->flags & DMAMAP_CACHE_ALIGNED); - if (map->sync_count) { - end = &map->slist[map->sync_count]; - for (sl = &map->slist[0]; sl != end; sl++) - bus_dmamap_sync_sl(sl, op, bufaligned); - } - -drain: - - cpu_drain_writebuf(); -} - -static void -init_bounce_pages(void *dummy __unused) -{ - - total_bpages = 0; - STAILQ_INIT(&bounce_zone_list); - STAILQ_INIT(&bounce_map_waitinglist); - STAILQ_INIT(&bounce_map_callbacklist); - mtx_init(&bounce_lock, "bounce pages lock", NULL, MTX_DEF); -} -SYSINIT(bpages, SI_SUB_LOCK, SI_ORDER_ANY, init_bounce_pages, NULL); - -static struct sysctl_ctx_list * -busdma_sysctl_tree(struct bounce_zone *bz) -{ - - return (&bz->sysctl_tree); -} - -static struct sysctl_oid * -busdma_sysctl_tree_top(struct bounce_zone *bz) -{ - - return (bz->sysctl_tree_top); -} - -static int -alloc_bounce_zone(bus_dma_tag_t dmat) -{ - struct bounce_zone *bz; - - /* Check to see if we already have a suitable zone */ - STAILQ_FOREACH(bz, &bounce_zone_list, links) { - if ((dmat->alignment <= bz->alignment) && - (dmat->lowaddr >= bz->lowaddr)) { - dmat->bounce_zone = bz; - return (0); - } - } - - if ((bz = (struct bounce_zone *)malloc(sizeof(*bz), M_BUSDMA, - M_NOWAIT | M_ZERO)) == NULL) - return (ENOMEM); - - STAILQ_INIT(&bz->bounce_page_list); - bz->free_bpages = 0; - bz->reserved_bpages = 0; - bz->active_bpages = 0; - bz->lowaddr = dmat->lowaddr; - bz->alignment = MAX(dmat->alignment, PAGE_SIZE); - bz->map_count = 0; - snprintf(bz->zoneid, 8, "zone%d", busdma_zonecount); - busdma_zonecount++; - snprintf(bz->lowaddrid, 18, "%#jx", (uintmax_t)bz->lowaddr); - STAILQ_INSERT_TAIL(&bounce_zone_list, bz, links); - dmat->bounce_zone = bz; - - sysctl_ctx_init(&bz->sysctl_tree); - bz->sysctl_tree_top = SYSCTL_ADD_NODE(&bz->sysctl_tree, - SYSCTL_STATIC_CHILDREN(_hw_busdma), OID_AUTO, bz->zoneid, - CTLFLAG_RD, 0, ""); - if (bz->sysctl_tree_top == NULL) { - sysctl_ctx_free(&bz->sysctl_tree); - return (0); /* XXX error code? */ - } - - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bpages", CTLFLAG_RD, &bz->total_bpages, 0, - "Total bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "free_bpages", CTLFLAG_RD, &bz->free_bpages, 0, - "Free bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "reserved_bpages", CTLFLAG_RD, &bz->reserved_bpages, 0, - "Reserved bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "active_bpages", CTLFLAG_RD, &bz->active_bpages, 0, - "Active bounce pages"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_bounced", CTLFLAG_RD, &bz->total_bounced, 0, - "Total bounce requests (pages bounced)"); - SYSCTL_ADD_INT(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "total_deferred", CTLFLAG_RD, &bz->total_deferred, 0, - "Total bounce requests that were deferred"); - SYSCTL_ADD_STRING(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "lowaddr", CTLFLAG_RD, bz->lowaddrid, 0, ""); - SYSCTL_ADD_ULONG(busdma_sysctl_tree(bz), - SYSCTL_CHILDREN(busdma_sysctl_tree_top(bz)), OID_AUTO, - "alignment", CTLFLAG_RD, &bz->alignment, ""); - - return (0); -} - -static int -alloc_bounce_pages(bus_dma_tag_t dmat, u_int numpages) -{ - struct bounce_zone *bz; - int count; - - bz = dmat->bounce_zone; - count = 0; - while (numpages > 0) { - struct bounce_page *bpage; - - bpage = (struct bounce_page *)malloc(sizeof(*bpage), M_BUSDMA, - M_NOWAIT | M_ZERO); - - if (bpage == NULL) - break; - bpage->vaddr = (vm_offset_t)contigmalloc(PAGE_SIZE, M_BOUNCE, - M_NOWAIT, 0ul, bz->lowaddr, PAGE_SIZE, 0); - if (bpage->vaddr == 0) { - free(bpage, M_BUSDMA); - break; - } - bpage->busaddr = pmap_kextract(bpage->vaddr); - mtx_lock(&bounce_lock); - STAILQ_INSERT_TAIL(&bz->bounce_page_list, bpage, links); - total_bpages++; - bz->total_bpages++; - bz->free_bpages++; - mtx_unlock(&bounce_lock); - count++; - numpages--; - } - return (count); -} - -static int -reserve_bounce_pages(bus_dma_tag_t dmat, bus_dmamap_t map, int commit) -{ - struct bounce_zone *bz; - int pages; - - mtx_assert(&bounce_lock, MA_OWNED); - bz = dmat->bounce_zone; - pages = MIN(bz->free_bpages, map->pagesneeded - map->pagesreserved); - if (commit == 0 && map->pagesneeded > (map->pagesreserved + pages)) - return (map->pagesneeded - (map->pagesreserved + pages)); - bz->free_bpages -= pages; - bz->reserved_bpages += pages; - map->pagesreserved += pages; - pages = map->pagesneeded - map->pagesreserved; - - return (pages); -} - -static bus_addr_t -add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr, - bus_addr_t addr, bus_size_t size) -{ - struct bounce_zone *bz; - struct bounce_page *bpage; - - KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag")); - KASSERT(map != NULL, ("add_bounce_page: bad map %p", map)); - - bz = dmat->bounce_zone; - if (map->pagesneeded == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesneeded--; - - if (map->pagesreserved == 0) - panic("add_bounce_page: map doesn't need any pages"); - map->pagesreserved--; - - mtx_lock(&bounce_lock); - bpage = STAILQ_FIRST(&bz->bounce_page_list); - if (bpage == NULL) - panic("add_bounce_page: free page list is empty"); - - STAILQ_REMOVE_HEAD(&bz->bounce_page_list, links); - bz->reserved_bpages--; - bz->active_bpages++; - mtx_unlock(&bounce_lock); - - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* Page offset needs to be preserved. */ - bpage->vaddr |= addr & PAGE_MASK; - bpage->busaddr |= addr & PAGE_MASK; - } - bpage->datavaddr = vaddr; - bpage->datapage = PHYS_TO_VM_PAGE(addr); - bpage->dataoffs = addr & PAGE_MASK; - bpage->datacount = size; - STAILQ_INSERT_TAIL(&(map->bpages), bpage, links); - return (bpage->busaddr); -} - -static void -free_bounce_page(bus_dma_tag_t dmat, struct bounce_page *bpage) -{ - struct bus_dmamap *map; - struct bounce_zone *bz; - - bz = dmat->bounce_zone; - bpage->datavaddr = 0; - bpage->datacount = 0; - if (dmat->flags & BUS_DMA_KEEP_PG_OFFSET) { - /* - * Reset the bounce page to start at offset 0. Other uses - * of this bounce page may need to store a full page of - * data and/or assume it starts on a page boundary. - */ - bpage->vaddr &= ~PAGE_MASK; - bpage->busaddr &= ~PAGE_MASK; - } - - mtx_lock(&bounce_lock); - STAILQ_INSERT_HEAD(&bz->bounce_page_list, bpage, links); - bz->free_bpages++; - bz->active_bpages--; - if ((map = STAILQ_FIRST(&bounce_map_waitinglist)) != NULL) { - if (reserve_bounce_pages(map->dmat, map, 1) == 0) { - STAILQ_REMOVE_HEAD(&bounce_map_waitinglist, links); - STAILQ_INSERT_TAIL(&bounce_map_callbacklist, - map, links); - busdma_swi_pending = 1; - bz->total_deferred++; - swi_sched(vm_ih, 0); - } - } - mtx_unlock(&bounce_lock); -} - -void -busdma_swi(void) -{ - bus_dma_tag_t dmat; - struct bus_dmamap *map; - - mtx_lock(&bounce_lock); - while ((map = STAILQ_FIRST(&bounce_map_callbacklist)) != NULL) { - STAILQ_REMOVE_HEAD(&bounce_map_callbacklist, links); - mtx_unlock(&bounce_lock); - dmat = map->dmat; - dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_LOCK); - bus_dmamap_load_mem(map->dmat, map, &map->mem, map->callback, - map->callback_arg, BUS_DMA_WAITOK); - dmat->lockfunc(dmat->lockfuncarg, BUS_DMA_UNLOCK); - mtx_lock(&bounce_lock); - } - mtx_unlock(&bounce_lock); -} diff --git a/sys/arm/arm/pmap-v4.c b/sys/arm/arm/pmap-v4.c new file mode 100644 index 000000000000..529e9f1d4518 --- /dev/null +++ b/sys/arm/arm/pmap-v4.c @@ -0,0 +1,4874 @@ +/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ +/*- + * Copyright 2004 Olivier Houchard. + * Copyright 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Steve C. Woodford for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 2002-2003 Wasabi Systems, Inc. + * Copyright (c) 2001 Richard Earnshaw + * Copyright (c) 2001-2002 Christopher Gilbert + * All rights reserved. + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + */ +/*- + * Copyright (c) 1999 The NetBSD Foundation, Inc. + * All rights reserved. + * + * This code is derived from software contributed to The NetBSD Foundation + * by Charles M. Hannum. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS + * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ + +/*- + * Copyright (c) 1994-1998 Mark Brinicombe. + * Copyright (c) 1994 Brini. + * All rights reserved. + * + * This code is derived from software written for Brini by Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Mark Brinicombe. + * 4. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * + * RiscBSD kernel project + * + * pmap.c + * + * Machine dependant vm stuff + * + * Created : 20/09/94 + */ + +/* + * Special compilation symbols + * PMAP_DEBUG - Build in pmap_debug_level code + * + * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c + */ +/* Include header files */ + +#include "opt_vm.h" + +#include +__FBSDID("$FreeBSD$"); +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include + +#ifdef PMAP_DEBUG +#define PDEBUG(_lev_,_stat_) \ + if (pmap_debug_level >= (_lev_)) \ + ((_stat_)) +#define dprintf printf + +int pmap_debug_level = 0; +#define PMAP_INLINE +#else /* PMAP_DEBUG */ +#define PDEBUG(_lev_,_stat_) /* Nothing */ +#define dprintf(x, arg...) +#define PMAP_INLINE __inline +#endif /* PMAP_DEBUG */ + +extern struct pv_addr systempage; + +extern int last_fault_code; + +/* + * Internal function prototypes + */ +static void pmap_free_pv_entry (pv_entry_t); +static pv_entry_t pmap_get_pv_entry(void); + +static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, + vm_prot_t, u_int); +static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); +static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); +static void pmap_alloc_l1(pmap_t); +static void pmap_free_l1(pmap_t); + +static int pmap_clearbit(struct vm_page *, u_int); + +static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); +static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); +static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); +static vm_offset_t kernel_pt_lookup(vm_paddr_t); + +static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); + +vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ +vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ +vm_offset_t pmap_curmaxkvaddr; +vm_paddr_t kernel_l1pa; + +vm_offset_t kernel_vm_end = 0; + +vm_offset_t vm_max_kernel_address; + +struct pmap kernel_pmap_store; + +static pt_entry_t *csrc_pte, *cdst_pte; +static vm_offset_t csrcp, cdstp, qmap_addr; +static struct mtx cmtx, qmap_mtx; + +static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); +/* + * These routines are called when the CPU type is identified to set up + * the PTE prototypes, cache modes, etc. + * + * The variables are always here, just in case LKMs need to reference + * them (though, they shouldn't). + */ + +pt_entry_t pte_l1_s_cache_mode; +pt_entry_t pte_l1_s_cache_mode_pt; +pt_entry_t pte_l1_s_cache_mask; + +pt_entry_t pte_l2_l_cache_mode; +pt_entry_t pte_l2_l_cache_mode_pt; +pt_entry_t pte_l2_l_cache_mask; + +pt_entry_t pte_l2_s_cache_mode; +pt_entry_t pte_l2_s_cache_mode_pt; +pt_entry_t pte_l2_s_cache_mask; + +pt_entry_t pte_l2_s_prot_u; +pt_entry_t pte_l2_s_prot_w; +pt_entry_t pte_l2_s_prot_mask; + +pt_entry_t pte_l1_s_proto; +pt_entry_t pte_l1_c_proto; +pt_entry_t pte_l2_s_proto; + +void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); +void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, + vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, + int cnt); +void (*pmap_zero_page_func)(vm_paddr_t, int, int); + +struct msgbuf *msgbufp = 0; + +/* + * Crashdump maps. + */ +static caddr_t crashdumpmap; + +extern void bcopy_page(vm_offset_t, vm_offset_t); +extern void bzero_page(vm_offset_t); + +extern vm_offset_t alloc_firstaddr; + +char *_tmppt; + +/* + * Metadata for L1 translation tables. + */ +struct l1_ttable { + /* Entry on the L1 Table list */ + SLIST_ENTRY(l1_ttable) l1_link; + + /* Entry on the L1 Least Recently Used list */ + TAILQ_ENTRY(l1_ttable) l1_lru; + + /* Track how many domains are allocated from this L1 */ + volatile u_int l1_domain_use_count; + + /* + * A free-list of domain numbers for this L1. + * We avoid using ffs() and a bitmap to track domains since ffs() + * is slow on ARM. + */ + u_int8_t l1_domain_first; + u_int8_t l1_domain_free[PMAP_DOMAINS]; + + /* Physical address of this L1 page table */ + vm_paddr_t l1_physaddr; + + /* KVA of this L1 page table */ + pd_entry_t *l1_kva; +}; + +/* + * Convert a virtual address into its L1 table index. That is, the + * index used to locate the L2 descriptor table pointer in an L1 table. + * This is basically used to index l1->l1_kva[]. + * + * Each L2 descriptor table represents 1MB of VA space. + */ +#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) + +/* + * L1 Page Tables are tracked using a Least Recently Used list. + * - New L1s are allocated from the HEAD. + * - Freed L1s are added to the TAIl. + * - Recently accessed L1s (where an 'access' is some change to one of + * the userland pmaps which owns this L1) are moved to the TAIL. + */ +static TAILQ_HEAD(, l1_ttable) l1_lru_list; +/* + * A list of all L1 tables + */ +static SLIST_HEAD(, l1_ttable) l1_list; +static struct mtx l1_lru_lock; + +/* + * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. + * + * This is normally 16MB worth L2 page descriptors for any given pmap. + * Reference counts are maintained for L2 descriptors so they can be + * freed when empty. + */ +struct l2_dtable { + /* The number of L2 page descriptors allocated to this l2_dtable */ + u_int l2_occupancy; + + /* List of L2 page descriptors */ + struct l2_bucket { + pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ + vm_paddr_t l2b_phys; /* Physical address of same */ + u_short l2b_l1idx; /* This L2 table's L1 index */ + u_short l2b_occupancy; /* How many active descriptors */ + } l2_bucket[L2_BUCKET_SIZE]; +}; + +/* pmap_kenter_internal flags */ +#define KENTER_CACHE 0x1 +#define KENTER_USER 0x2 + +/* + * Given an L1 table index, calculate the corresponding l2_dtable index + * and bucket index within the l2_dtable. + */ +#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ + (L2_SIZE - 1)) +#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) + +/* + * Given a virtual address, this macro returns the + * virtual address required to drop into the next L2 bucket. + */ +#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) + +/* + * We try to map the page tables write-through, if possible. However, not + * all CPUs have a write-through cache mode, so on those we have to sync + * the cache when we frob page tables. + * + * We try to evaluate this at compile time, if possible. However, it's + * not always possible to do that, hence this run-time var. + */ +int pmap_needs_pte_sync; + +/* + * Macro to determine if a mapping might be resident in the + * instruction cache and/or TLB + */ +#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) + +/* + * Macro to determine if a mapping might be resident in the + * data cache and/or TLB + */ +#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) + +#ifndef PMAP_SHPGPERPROC +#define PMAP_SHPGPERPROC 200 +#endif + +#define pmap_is_current(pm) ((pm) == kernel_pmap || \ + curproc->p_vmspace->vm_map.pmap == (pm)) +static uma_zone_t pvzone = NULL; +uma_zone_t l2zone; +static uma_zone_t l2table_zone; +static vm_offset_t pmap_kernel_l2dtable_kva; +static vm_offset_t pmap_kernel_l2ptp_kva; +static vm_paddr_t pmap_kernel_l2ptp_phys; +static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; +static struct rwlock pvh_global_lock; + +void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); +#if ARM_MMU_XSCALE == 1 +void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); +#endif + +/* + * This list exists for the benefit of pmap_map_chunk(). It keeps track + * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can + * find them as necessary. + * + * Note that the data on this list MUST remain valid after initarm() returns, + * as pmap_bootstrap() uses it to contruct L2 table metadata. + */ +SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); + +static void +pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) +{ + int i; + + l1->l1_kva = l1pt; + l1->l1_domain_use_count = 0; + l1->l1_domain_first = 0; + + for (i = 0; i < PMAP_DOMAINS; i++) + l1->l1_domain_free[i] = i + 1; + + /* + * Copy the kernel's L1 entries to each new L1. + */ + if (l1pt != kernel_pmap->pm_l1->l1_kva) + memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE); + + if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0) + panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); + SLIST_INSERT_HEAD(&l1_list, l1, l1_link); + TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); +} + +static vm_offset_t +kernel_pt_lookup(vm_paddr_t pa) +{ + struct pv_addr *pv; + + SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { + if (pv->pv_pa == pa) + return (pv->pv_va); + } + return (0); +} + +#if ARM_MMU_GENERIC != 0 +void +pmap_pte_init_generic(void) +{ + + pte_l1_s_cache_mode = L1_S_B|L1_S_C; + pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; + + pte_l2_l_cache_mode = L2_B|L2_C; + pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; + + pte_l2_s_cache_mode = L2_B|L2_C; + pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; + + /* + * If we have a write-through cache, set B and C. If + * we have a write-back cache, then we assume setting + * only C will make those pages write-through. + */ + if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { + pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; + pte_l2_l_cache_mode_pt = L2_B|L2_C; + pte_l2_s_cache_mode_pt = L2_B|L2_C; + } else { + pte_l1_s_cache_mode_pt = L1_S_C; + pte_l2_l_cache_mode_pt = L2_C; + pte_l2_s_cache_mode_pt = L2_C; + } + + pte_l2_s_prot_u = L2_S_PROT_U_generic; + pte_l2_s_prot_w = L2_S_PROT_W_generic; + pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; + + pte_l1_s_proto = L1_S_PROTO_generic; + pte_l1_c_proto = L1_C_PROTO_generic; + pte_l2_s_proto = L2_S_PROTO_generic; + + pmap_copy_page_func = pmap_copy_page_generic; + pmap_copy_page_offs_func = pmap_copy_page_offs_generic; + pmap_zero_page_func = pmap_zero_page_generic; +} + +#endif /* ARM_MMU_GENERIC != 0 */ + +#if ARM_MMU_XSCALE == 1 +#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3) +static u_int xscale_use_minidata; +#endif + +void +pmap_pte_init_xscale(void) +{ + uint32_t auxctl; + int write_through = 0; + + pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; + pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; + + pte_l2_l_cache_mode = L2_B|L2_C; + pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; + + pte_l2_s_cache_mode = L2_B|L2_C; + pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; + + pte_l1_s_cache_mode_pt = L1_S_C; + pte_l2_l_cache_mode_pt = L2_C; + pte_l2_s_cache_mode_pt = L2_C; +#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE + /* + * The XScale core has an enhanced mode where writes that + * miss the cache cause a cache line to be allocated. This + * is significantly faster than the traditional, write-through + * behavior of this case. + */ + pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); + pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); + pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); +#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ +#ifdef XSCALE_CACHE_WRITE_THROUGH + /* + * Some versions of the XScale core have various bugs in + * their cache units, the work-around for which is to run + * the cache in write-through mode. Unfortunately, this + * has a major (negative) impact on performance. So, we + * go ahead and run fast-and-loose, in the hopes that we + * don't line up the planets in a way that will trip the + * bugs. + * + * However, we give you the option to be slow-but-correct. + */ + write_through = 1; +#elif defined(XSCALE_CACHE_WRITE_BACK) + /* force write back cache mode */ + write_through = 0; +#elif defined(CPU_XSCALE_PXA2X0) + /* + * Intel PXA2[15]0 processors are known to have a bug in + * write-back cache on revision 4 and earlier (stepping + * A[01] and B[012]). Fixed for C0 and later. + */ + { + uint32_t id, type; + + id = cpu_ident(); + type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); + + if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { + if ((id & CPU_ID_REVISION_MASK) < 5) { + /* write through for stepping A0-1 and B0-2 */ + write_through = 1; + } + } + } +#endif /* XSCALE_CACHE_WRITE_THROUGH */ + + if (write_through) { + pte_l1_s_cache_mode = L1_S_C; + pte_l2_l_cache_mode = L2_C; + pte_l2_s_cache_mode = L2_C; + } + +#if (ARM_NMMUS > 1) + xscale_use_minidata = 1; +#endif + + pte_l2_s_prot_u = L2_S_PROT_U_xscale; + pte_l2_s_prot_w = L2_S_PROT_W_xscale; + pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; + + pte_l1_s_proto = L1_S_PROTO_xscale; + pte_l1_c_proto = L1_C_PROTO_xscale; + pte_l2_s_proto = L2_S_PROTO_xscale; + +#ifdef CPU_XSCALE_CORE3 + pmap_copy_page_func = pmap_copy_page_generic; + pmap_copy_page_offs_func = pmap_copy_page_offs_generic; + pmap_zero_page_func = pmap_zero_page_generic; + xscale_use_minidata = 0; + /* Make sure it is L2-cachable */ + pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T); + pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P; + pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ; + pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode; + pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T); + pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; + +#else + pmap_copy_page_func = pmap_copy_page_xscale; + pmap_copy_page_offs_func = pmap_copy_page_offs_xscale; + pmap_zero_page_func = pmap_zero_page_xscale; +#endif + + /* + * Disable ECC protection of page table access, for now. + */ + __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); + auxctl &= ~XSCALE_AUXCTL_P; + __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); +} + +/* + * xscale_setup_minidata: + * + * Set up the mini-data cache clean area. We require the + * caller to allocate the right amount of physically and + * virtually contiguous space. + */ +extern vm_offset_t xscale_minidata_clean_addr; +extern vm_size_t xscale_minidata_clean_size; /* already initialized */ +void +xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) +{ + pd_entry_t *pde = (pd_entry_t *) l1pt; + pt_entry_t *pte; + vm_size_t size; + uint32_t auxctl; + + xscale_minidata_clean_addr = va; + + /* Round it to page size. */ + size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; + + for (; size != 0; + va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { + pte = (pt_entry_t *) kernel_pt_lookup( + pde[L1_IDX(va)] & L1_C_ADDR_MASK); + if (pte == NULL) + panic("xscale_setup_minidata: can't find L2 table for " + "VA 0x%08x", (u_int32_t) va); + pte[l2pte_index(va)] = + L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); + } + + /* + * Configure the mini-data cache for write-back with + * read/write-allocate. + * + * NOTE: In order to reconfigure the mini-data cache, we must + * make sure it contains no valid data! In order to do that, + * we must issue a global data cache invalidate command! + * + * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! + * THIS IS VERY IMPORTANT! + */ + + /* Invalidate data and mini-data. */ + __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); + __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); + auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; + __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); +} +#endif + +/* + * Allocate an L1 translation table for the specified pmap. + * This is called at pmap creation time. + */ +static void +pmap_alloc_l1(pmap_t pm) +{ + struct l1_ttable *l1; + u_int8_t domain; + + /* + * Remove the L1 at the head of the LRU list + */ + mtx_lock(&l1_lru_lock); + l1 = TAILQ_FIRST(&l1_lru_list); + TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); + + /* + * Pick the first available domain number, and update + * the link to the next number. + */ + domain = l1->l1_domain_first; + l1->l1_domain_first = l1->l1_domain_free[domain]; + + /* + * If there are still free domain numbers in this L1, + * put it back on the TAIL of the LRU list. + */ + if (++l1->l1_domain_use_count < PMAP_DOMAINS) + TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); + + mtx_unlock(&l1_lru_lock); + + /* + * Fix up the relevant bits in the pmap structure + */ + pm->pm_l1 = l1; + pm->pm_domain = domain + 1; +} + +/* + * Free an L1 translation table. + * This is called at pmap destruction time. + */ +static void +pmap_free_l1(pmap_t pm) +{ + struct l1_ttable *l1 = pm->pm_l1; + + mtx_lock(&l1_lru_lock); + + /* + * If this L1 is currently on the LRU list, remove it. + */ + if (l1->l1_domain_use_count < PMAP_DOMAINS) + TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); + + /* + * Free up the domain number which was allocated to the pmap + */ + l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; + l1->l1_domain_first = pm->pm_domain - 1; + l1->l1_domain_use_count--; + + /* + * The L1 now must have at least 1 free domain, so add + * it back to the LRU list. If the use count is zero, + * put it at the head of the list, otherwise it goes + * to the tail. + */ + if (l1->l1_domain_use_count == 0) { + TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); + } else + TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); + + mtx_unlock(&l1_lru_lock); +} + +/* + * Returns a pointer to the L2 bucket associated with the specified pmap + * and VA, or NULL if no L2 bucket exists for the address. + */ +static PMAP_INLINE struct l2_bucket * +pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) +{ + struct l2_dtable *l2; + struct l2_bucket *l2b; + u_short l1idx; + + l1idx = L1_IDX(va); + + if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || + (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) + return (NULL); + + return (l2b); +} + +/* + * Returns a pointer to the L2 bucket associated with the specified pmap + * and VA. + * + * If no L2 bucket exists, perform the necessary allocations to put an L2 + * bucket/page table in place. + * + * Note that if a new L2 bucket/page was allocated, the caller *must* + * increment the bucket occupancy counter appropriately *before* + * releasing the pmap's lock to ensure no other thread or cpu deallocates + * the bucket/page in the meantime. + */ +static struct l2_bucket * +pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) +{ + struct l2_dtable *l2; + struct l2_bucket *l2b; + u_short l1idx; + + l1idx = L1_IDX(va); + + PMAP_ASSERT_LOCKED(pm); + rw_assert(&pvh_global_lock, RA_WLOCKED); + if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { + /* + * No mapping at this address, as there is + * no entry in the L1 table. + * Need to allocate a new l2_dtable. + */ + PMAP_UNLOCK(pm); + rw_wunlock(&pvh_global_lock); + if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) { + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pm); + return (NULL); + } + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pm); + if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { + /* + * Someone already allocated the l2_dtable while + * we were doing the same. + */ + uma_zfree(l2table_zone, l2); + l2 = pm->pm_l2[L2_IDX(l1idx)]; + } else { + bzero(l2, sizeof(*l2)); + /* + * Link it into the parent pmap + */ + pm->pm_l2[L2_IDX(l1idx)] = l2; + } + } + + l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; + + /* + * Fetch pointer to the L2 page table associated with the address. + */ + if (l2b->l2b_kva == NULL) { + pt_entry_t *ptep; + + /* + * No L2 page table has been allocated. Chances are, this + * is because we just allocated the l2_dtable, above. + */ + l2->l2_occupancy++; + PMAP_UNLOCK(pm); + rw_wunlock(&pvh_global_lock); + ptep = uma_zalloc(l2zone, M_NOWAIT); + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pm); + if (l2b->l2b_kva != 0) { + /* We lost the race. */ + l2->l2_occupancy--; + uma_zfree(l2zone, ptep); + return (l2b); + } + l2b->l2b_phys = vtophys(ptep); + if (ptep == NULL) { + /* + * Oops, no more L2 page tables available at this + * time. We may need to deallocate the l2_dtable + * if we allocated a new one above. + */ + l2->l2_occupancy--; + if (l2->l2_occupancy == 0) { + pm->pm_l2[L2_IDX(l1idx)] = NULL; + uma_zfree(l2table_zone, l2); + } + return (NULL); + } + + l2b->l2b_kva = ptep; + l2b->l2b_l1idx = l1idx; + } + + return (l2b); +} + +static PMAP_INLINE void +#ifndef PMAP_INCLUDE_PTE_SYNC +pmap_free_l2_ptp(pt_entry_t *l2) +#else +pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) +#endif +{ +#ifdef PMAP_INCLUDE_PTE_SYNC + /* + * Note: With a write-back cache, we may need to sync this + * L2 table before re-using it. + * This is because it may have belonged to a non-current + * pmap, in which case the cache syncs would have been + * skipped when the pages were being unmapped. If the + * L2 table were then to be immediately re-allocated to + * the *current* pmap, it may well contain stale mappings + * which have not yet been cleared by a cache write-back + * and so would still be visible to the mmu. + */ + if (need_sync) + PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); +#endif + uma_zfree(l2zone, l2); +} +/* + * One or more mappings in the specified L2 descriptor table have just been + * invalidated. + * + * Garbage collect the metadata and descriptor table itself if necessary. + * + * The pmap lock must be acquired when this is called (not necessary + * for the kernel pmap). + */ +static void +pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) +{ + struct l2_dtable *l2; + pd_entry_t *pl1pd, l1pd; + pt_entry_t *ptep; + u_short l1idx; + + + /* + * Update the bucket's reference count according to how many + * PTEs the caller has just invalidated. + */ + l2b->l2b_occupancy -= count; + + /* + * Note: + * + * Level 2 page tables allocated to the kernel pmap are never freed + * as that would require checking all Level 1 page tables and + * removing any references to the Level 2 page table. See also the + * comment elsewhere about never freeing bootstrap L2 descriptors. + * + * We make do with just invalidating the mapping in the L2 table. + * + * This isn't really a big deal in practice and, in fact, leads + * to a performance win over time as we don't need to continually + * alloc/free. + */ + if (l2b->l2b_occupancy > 0 || pm == kernel_pmap) + return; + + /* + * There are no more valid mappings in this level 2 page table. + * Go ahead and NULL-out the pointer in the bucket, then + * free the page table. + */ + l1idx = l2b->l2b_l1idx; + ptep = l2b->l2b_kva; + l2b->l2b_kva = NULL; + + pl1pd = &pm->pm_l1->l1_kva[l1idx]; + + /* + * If the L1 slot matches the pmap's domain + * number, then invalidate it. + */ + l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); + if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { + *pl1pd = 0; + PTE_SYNC(pl1pd); + } + + /* + * Release the L2 descriptor table back to the pool cache. + */ +#ifndef PMAP_INCLUDE_PTE_SYNC + pmap_free_l2_ptp(ptep); +#else + pmap_free_l2_ptp(!pmap_is_current(pm), ptep); +#endif + + /* + * Update the reference count in the associated l2_dtable + */ + l2 = pm->pm_l2[L2_IDX(l1idx)]; + if (--l2->l2_occupancy > 0) + return; + + /* + * There are no more valid mappings in any of the Level 1 + * slots managed by this l2_dtable. Go ahead and NULL-out + * the pointer in the parent pmap and free the l2_dtable. + */ + pm->pm_l2[L2_IDX(l1idx)] = NULL; + uma_zfree(l2table_zone, l2); +} + +/* + * Pool cache constructors for L2 descriptor tables, metadata and pmap + * structures. + */ +static int +pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) +{ +#ifndef PMAP_INCLUDE_PTE_SYNC + struct l2_bucket *l2b; + pt_entry_t *ptep, pte; + + vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; + + /* + * The mappings for these page tables were initially made using + * pmap_kenter() by the pool subsystem. Therefore, the cache- + * mode will not be right for page table mappings. To avoid + * polluting the pmap_kenter() code with a special case for + * page tables, we simply fix up the cache-mode here if it's not + * correct. + */ + l2b = pmap_get_l2_bucket(kernel_pmap, va); + ptep = &l2b->l2b_kva[l2pte_index(va)]; + pte = *ptep; + + if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { + /* + * Page tables must have the cache-mode set to + * Write-Thru. + */ + *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; + PTE_SYNC(ptep); + cpu_tlb_flushD_SE(va); + cpu_cpwait(); + } +#endif + memset(mem, 0, L2_TABLE_SIZE_REAL); + PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); + return (0); +} + +/* + * A bunch of routines to conditionally flush the caches/TLB depending + * on whether the specified pmap actually needs to be flushed at any + * given time. + */ +static PMAP_INLINE void +pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) +{ + + if (pmap_is_current(pm)) + cpu_tlb_flushID_SE(va); +} + +static PMAP_INLINE void +pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) +{ + + if (pmap_is_current(pm)) + cpu_tlb_flushD_SE(va); +} + +static PMAP_INLINE void +pmap_tlb_flushID(pmap_t pm) +{ + + if (pmap_is_current(pm)) + cpu_tlb_flushID(); +} +static PMAP_INLINE void +pmap_tlb_flushD(pmap_t pm) +{ + + if (pmap_is_current(pm)) + cpu_tlb_flushD(); +} + +static int +pmap_has_valid_mapping(pmap_t pm, vm_offset_t va) +{ + pd_entry_t *pde; + pt_entry_t *ptep; + + if (pmap_get_pde_pte(pm, va, &pde, &ptep) && + ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV)) + return (1); + + return (0); +} + +static PMAP_INLINE void +pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) +{ + vm_size_t rest; + + CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" + " len 0x%x ", pm, pm == kernel_pmap, va, len); + + if (pmap_is_current(pm) || pm == kernel_pmap) { + rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); + while (len > 0) { + if (pmap_has_valid_mapping(pm, va)) { + cpu_idcache_wbinv_range(va, rest); + cpu_l2cache_wbinv_range(va, rest); + } + len -= rest; + va += rest; + rest = MIN(PAGE_SIZE, len); + } + } +} + +static PMAP_INLINE void +pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, + boolean_t rd_only) +{ + vm_size_t rest; + + CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " + "len 0x%x ", pm, pm == kernel_pmap, va, len); + CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); + + if (pmap_is_current(pm)) { + rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); + while (len > 0) { + if (pmap_has_valid_mapping(pm, va)) { + if (do_inv && rd_only) { + cpu_dcache_inv_range(va, rest); + cpu_l2cache_inv_range(va, rest); + } else if (do_inv) { + cpu_dcache_wbinv_range(va, rest); + cpu_l2cache_wbinv_range(va, rest); + } else if (!rd_only) { + cpu_dcache_wb_range(va, rest); + cpu_l2cache_wb_range(va, rest); + } + } + len -= rest; + va += rest; + + rest = MIN(PAGE_SIZE, len); + } + } +} + +static PMAP_INLINE void +pmap_idcache_wbinv_all(pmap_t pm) +{ + + if (pmap_is_current(pm)) { + cpu_idcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + } +} + +#ifdef notyet +static PMAP_INLINE void +pmap_dcache_wbinv_all(pmap_t pm) +{ + + if (pmap_is_current(pm)) { + cpu_dcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + } +} +#endif + +/* + * PTE_SYNC_CURRENT: + * + * Make sure the pte is written out to RAM. + * We need to do this for one of two cases: + * - We're dealing with the kernel pmap + * - There is no pmap active in the cache/tlb. + * - The specified pmap is 'active' in the cache/tlb. + */ +#ifdef PMAP_INCLUDE_PTE_SYNC +#define PTE_SYNC_CURRENT(pm, ptep) \ +do { \ + if (PMAP_NEEDS_PTE_SYNC && \ + pmap_is_current(pm)) \ + PTE_SYNC(ptep); \ +} while (/*CONSTCOND*/0) +#else +#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ +#endif + +/* + * cacheable == -1 means we must make the entry uncacheable, 1 means + * cacheable; + */ +static __inline void +pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable) +{ + struct l2_bucket *l2b; + pt_entry_t *ptep, pte; + + l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); + ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; + + if (cacheable == 1) { + pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; + if (l2pte_valid(pte)) { + if (PV_BEEN_EXECD(pv->pv_flags)) { + pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); + } else if (PV_BEEN_REFD(pv->pv_flags)) { + pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); + } + } + } else { + pte = *ptep &~ L2_S_CACHE_MASK; + if ((va != pv->pv_va || pm != pv->pv_pmap) && + l2pte_valid(pte)) { + if (PV_BEEN_EXECD(pv->pv_flags)) { + pmap_idcache_wbinv_range(pv->pv_pmap, + pv->pv_va, PAGE_SIZE); + pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); + } else if (PV_BEEN_REFD(pv->pv_flags)) { + pmap_dcache_wb_range(pv->pv_pmap, + pv->pv_va, PAGE_SIZE, TRUE, + (pv->pv_flags & PVF_WRITE) == 0); + pmap_tlb_flushD_SE(pv->pv_pmap, + pv->pv_va); + } + } + } + *ptep = pte; + PTE_SYNC_CURRENT(pv->pv_pmap, ptep); +} + +static void +pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) +{ + int pmwc = 0; + int writable = 0, kwritable = 0, uwritable = 0; + int entries = 0, kentries = 0, uentries = 0; + struct pv_entry *pv; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + + /* the cache gets written back/invalidated on context switch. + * therefore, if a user page shares an entry in the same page or + * with the kernel map and at least one is writable, then the + * cache entry must be set write-through. + */ + + TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { + /* generate a count of the pv_entry uses */ + if (pv->pv_flags & PVF_WRITE) { + if (pv->pv_pmap == kernel_pmap) + kwritable++; + else if (pv->pv_pmap == pm) + uwritable++; + writable++; + } + if (pv->pv_pmap == kernel_pmap) + kentries++; + else { + if (pv->pv_pmap == pm) + uentries++; + entries++; + } + } + /* + * check if the user duplicate mapping has + * been removed. + */ + if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) || + (uwritable > 1))) + pmwc = 1; + + TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { + /* check for user uncachable conditions - order is important */ + if (pm != kernel_pmap && + (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) { + + if ((uentries > 1 && uwritable) || uwritable > 1) { + + /* user duplicate mapping */ + if (pv->pv_pmap != kernel_pmap) + pv->pv_flags |= PVF_MWC; + + if (!(pv->pv_flags & PVF_NC)) { + pv->pv_flags |= PVF_NC; + pmap_set_cache_entry(pv, pm, va, -1); + } + continue; + } else /* no longer a duplicate user */ + pv->pv_flags &= ~PVF_MWC; + } + + /* + * check for kernel uncachable conditions + * kernel writable or kernel readable with writable user entry + */ + if ((kwritable && (entries || kentries > 1)) || + (kwritable > 1) || + ((kwritable != writable) && kentries && + (pv->pv_pmap == kernel_pmap || + (pv->pv_flags & PVF_WRITE) || + (pv->pv_flags & PVF_MWC)))) { + + if (!(pv->pv_flags & PVF_NC)) { + pv->pv_flags |= PVF_NC; + pmap_set_cache_entry(pv, pm, va, -1); + } + continue; + } + + /* kernel and user are cachable */ + if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) && + (pv->pv_flags & PVF_NC)) { + + pv->pv_flags &= ~PVF_NC; + if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + pmap_set_cache_entry(pv, pm, va, 1); + continue; + } + /* user is no longer sharable and writable */ + if (pm != kernel_pmap && + (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) && + !pmwc && (pv->pv_flags & PVF_NC)) { + + pv->pv_flags &= ~(PVF_NC | PVF_MWC); + if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + pmap_set_cache_entry(pv, pm, va, 1); + } + } + + if ((kwritable == 0) && (writable == 0)) { + pg->md.pvh_attrs &= ~PVF_MOD; + vm_page_aflag_clear(pg, PGA_WRITEABLE); + return; + } +} + +/* + * Modify pte bits for all ptes corresponding to the given physical address. + * We use `maskbits' rather than `clearbits' because we're always passing + * constants and the latter would require an extra inversion at run-time. + */ +static int +pmap_clearbit(struct vm_page *pg, u_int maskbits) +{ + struct l2_bucket *l2b; + struct pv_entry *pv; + pt_entry_t *ptep, npte, opte; + pmap_t pm; + vm_offset_t va; + u_int oflags; + int count = 0; + + rw_wlock(&pvh_global_lock); + + if (maskbits & PVF_WRITE) + maskbits |= PVF_MOD; + /* + * Clear saved attributes (modify, reference) + */ + pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); + + if (TAILQ_EMPTY(&pg->md.pv_list)) { + rw_wunlock(&pvh_global_lock); + return (0); + } + + /* + * Loop over all current mappings setting/clearing as appropos + */ + TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { + va = pv->pv_va; + pm = pv->pv_pmap; + oflags = pv->pv_flags; + + if (!(oflags & maskbits)) { + if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { + if (pg->md.pv_memattr != + VM_MEMATTR_UNCACHEABLE) { + PMAP_LOCK(pm); + l2b = pmap_get_l2_bucket(pm, va); + ptep = &l2b->l2b_kva[l2pte_index(va)]; + *ptep |= pte_l2_s_cache_mode; + PTE_SYNC(ptep); + PMAP_UNLOCK(pm); + } + pv->pv_flags &= ~(PVF_NC | PVF_MWC); + } + continue; + } + pv->pv_flags &= ~maskbits; + + PMAP_LOCK(pm); + + l2b = pmap_get_l2_bucket(pm, va); + + ptep = &l2b->l2b_kva[l2pte_index(va)]; + npte = opte = *ptep; + + if (maskbits & (PVF_WRITE|PVF_MOD)) { + if ((pv->pv_flags & PVF_NC)) { + /* + * Entry is not cacheable: + * + * Don't turn caching on again if this is a + * modified emulation. This would be + * inconsitent with the settings created by + * pmap_fix_cache(). Otherwise, it's safe + * to re-enable cacheing. + * + * There's no need to call pmap_fix_cache() + * here: all pages are losing their write + * permission. + */ + if (maskbits & PVF_WRITE) { + if (pg->md.pv_memattr != + VM_MEMATTR_UNCACHEABLE) + npte |= pte_l2_s_cache_mode; + pv->pv_flags &= ~(PVF_NC | PVF_MWC); + } + } else + if (opte & L2_S_PROT_W) { + vm_page_dirty(pg); + /* + * Entry is writable/cacheable: check if pmap + * is current if it is flush it, otherwise it + * won't be in the cache + */ + if (PV_BEEN_EXECD(oflags)) + pmap_idcache_wbinv_range(pm, pv->pv_va, + PAGE_SIZE); + else + if (PV_BEEN_REFD(oflags)) + pmap_dcache_wb_range(pm, pv->pv_va, + PAGE_SIZE, + (maskbits & PVF_REF) ? TRUE : FALSE, + FALSE); + } + + /* make the pte read only */ + npte &= ~L2_S_PROT_W; + } + + if (maskbits & PVF_REF) { + if ((pv->pv_flags & PVF_NC) == 0 && + (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { + /* + * Check npte here; we may have already + * done the wbinv above, and the validity + * of the PTE is the same for opte and + * npte. + */ + if (npte & L2_S_PROT_W) { + if (PV_BEEN_EXECD(oflags)) + pmap_idcache_wbinv_range(pm, + pv->pv_va, PAGE_SIZE); + else + if (PV_BEEN_REFD(oflags)) + pmap_dcache_wb_range(pm, + pv->pv_va, PAGE_SIZE, + TRUE, FALSE); + } else + if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { + /* XXXJRT need idcache_inv_range */ + if (PV_BEEN_EXECD(oflags)) + pmap_idcache_wbinv_range(pm, + pv->pv_va, PAGE_SIZE); + else + if (PV_BEEN_REFD(oflags)) + pmap_dcache_wb_range(pm, + pv->pv_va, PAGE_SIZE, + TRUE, TRUE); + } + } + + /* + * Make the PTE invalid so that we will take a + * page fault the next time the mapping is + * referenced. + */ + npte &= ~L2_TYPE_MASK; + npte |= L2_TYPE_INV; + } + + if (npte != opte) { + count++; + *ptep = npte; + PTE_SYNC(ptep); + /* Flush the TLB entry if a current pmap. */ + if (PV_BEEN_EXECD(oflags)) + pmap_tlb_flushID_SE(pm, pv->pv_va); + else + if (PV_BEEN_REFD(oflags)) + pmap_tlb_flushD_SE(pm, pv->pv_va); + } + + PMAP_UNLOCK(pm); + + } + + if (maskbits & PVF_WRITE) + vm_page_aflag_clear(pg, PGA_WRITEABLE); + rw_wunlock(&pvh_global_lock); + return (count); +} + +/* + * main pv_entry manipulation functions: + * pmap_enter_pv: enter a mapping onto a vm_page list + * pmap_remove_pv: remove a mappiing from a vm_page list + * + * NOTE: pmap_enter_pv expects to lock the pvh itself + * pmap_remove_pv expects the caller to lock the pvh before calling + */ + +/* + * pmap_enter_pv: enter a mapping onto a vm_page's PV list + * + * => caller should hold the proper lock on pvh_global_lock + * => caller should have pmap locked + * => we will (someday) gain the lock on the vm_page's PV list + * => caller should adjust ptp's wire_count before calling + * => caller should not adjust pmap's wire_count + */ +static void +pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, + vm_offset_t va, u_int flags) +{ + + rw_assert(&pvh_global_lock, RA_WLOCKED); + PMAP_ASSERT_LOCKED(pm); + if (pg->md.pv_kva != 0) { + pve->pv_pmap = kernel_pmap; + pve->pv_va = pg->md.pv_kva; + pve->pv_flags = PVF_WRITE | PVF_UNMAN; + if (pm != kernel_pmap) + PMAP_LOCK(kernel_pmap); + TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); + TAILQ_INSERT_HEAD(&kernel_pmap->pm_pvlist, pve, pv_plist); + if (pm != kernel_pmap) + PMAP_UNLOCK(kernel_pmap); + pg->md.pv_kva = 0; + if ((pve = pmap_get_pv_entry()) == NULL) + panic("pmap_kenter_pv: no pv entries"); + } + pve->pv_pmap = pm; + pve->pv_va = va; + pve->pv_flags = flags; + TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); + TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); + pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); + if (pve->pv_flags & PVF_WIRED) + ++pm->pm_stats.wired_count; + vm_page_aflag_set(pg, PGA_REFERENCED); +} + +/* + * + * pmap_find_pv: Find a pv entry + * + * => caller should hold lock on vm_page + */ +static PMAP_INLINE struct pv_entry * +pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) +{ + struct pv_entry *pv; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) + if (pm == pv->pv_pmap && va == pv->pv_va) + break; + return (pv); +} + +/* + * vector_page_setprot: + * + * Manipulate the protection of the vector page. + */ +void +vector_page_setprot(int prot) +{ + struct l2_bucket *l2b; + pt_entry_t *ptep; + + l2b = pmap_get_l2_bucket(kernel_pmap, vector_page); + + ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; + + *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); + PTE_SYNC(ptep); + cpu_tlb_flushD_SE(vector_page); + cpu_cpwait(); +} + +/* + * pmap_remove_pv: try to remove a mapping from a pv_list + * + * => caller should hold proper lock on pmap_main_lock + * => pmap should be locked + * => caller should hold lock on vm_page [so that attrs can be adjusted] + * => caller should adjust ptp's wire_count and free PTP if needed + * => caller should NOT adjust pmap's wire_count + * => we return the removed pve + */ + +static void +pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) +{ + + struct pv_entry *pv; + rw_assert(&pvh_global_lock, RA_WLOCKED); + PMAP_ASSERT_LOCKED(pm); + TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); + TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); + if (pve->pv_flags & PVF_WIRED) + --pm->pm_stats.wired_count; + if (pg->md.pvh_attrs & PVF_MOD) + vm_page_dirty(pg); + if (TAILQ_FIRST(&pg->md.pv_list) == NULL) + pg->md.pvh_attrs &= ~PVF_REF; + else + vm_page_aflag_set(pg, PGA_REFERENCED); + if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) || + (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) + pmap_fix_cache(pg, pm, 0); + else if (pve->pv_flags & PVF_WRITE) { + TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list) + if (pve->pv_flags & PVF_WRITE) + break; + if (!pve) { + pg->md.pvh_attrs &= ~PVF_MOD; + vm_page_aflag_clear(pg, PGA_WRITEABLE); + } + } + pv = TAILQ_FIRST(&pg->md.pv_list); + if (pv != NULL && (pv->pv_flags & PVF_UNMAN) && + TAILQ_NEXT(pv, pv_list) == NULL) { + pm = kernel_pmap; + pg->md.pv_kva = pv->pv_va; + /* a recursive pmap_nuke_pv */ + TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list); + TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist); + if (pv->pv_flags & PVF_WIRED) + --pm->pm_stats.wired_count; + pg->md.pvh_attrs &= ~PVF_REF; + pg->md.pvh_attrs &= ~PVF_MOD; + vm_page_aflag_clear(pg, PGA_WRITEABLE); + pmap_free_pv_entry(pv); + } +} + +static struct pv_entry * +pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) +{ + struct pv_entry *pve; + + rw_assert(&pvh_global_lock, RA_WLOCKED); + pve = TAILQ_FIRST(&pg->md.pv_list); + + while (pve) { + if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ + pmap_nuke_pv(pg, pm, pve); + break; + } + pve = TAILQ_NEXT(pve, pv_list); + } + + if (pve == NULL && pg->md.pv_kva == va) + pg->md.pv_kva = 0; + + return(pve); /* return removed pve */ +} +/* + * + * pmap_modify_pv: Update pv flags + * + * => caller should hold lock on vm_page [so that attrs can be adjusted] + * => caller should NOT adjust pmap's wire_count + * => we return the old flags + * + * Modify a physical-virtual mapping in the pv table + */ +static u_int +pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, + u_int clr_mask, u_int set_mask) +{ + struct pv_entry *npv; + u_int flags, oflags; + + PMAP_ASSERT_LOCKED(pm); + rw_assert(&pvh_global_lock, RA_WLOCKED); + if ((npv = pmap_find_pv(pg, pm, va)) == NULL) + return (0); + + /* + * There is at least one VA mapping this page. + */ + + if (clr_mask & (PVF_REF | PVF_MOD)) + pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); + + oflags = npv->pv_flags; + npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; + + if ((flags ^ oflags) & PVF_WIRED) { + if (flags & PVF_WIRED) + ++pm->pm_stats.wired_count; + else + --pm->pm_stats.wired_count; + } + + if ((flags ^ oflags) & PVF_WRITE) + pmap_fix_cache(pg, pm, 0); + + return (oflags); +} + +/* Function to set the debug level of the pmap code */ +#ifdef PMAP_DEBUG +void +pmap_debug(int level) +{ + pmap_debug_level = level; + dprintf("pmap_debug: level=%d\n", pmap_debug_level); +} +#endif /* PMAP_DEBUG */ + +void +pmap_pinit0(struct pmap *pmap) +{ + PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); + + bcopy(kernel_pmap, pmap, sizeof(*pmap)); + bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); + PMAP_LOCK_INIT(pmap); +} + +/* + * Initialize a vm_page's machine-dependent fields. + */ +void +pmap_page_init(vm_page_t m) +{ + + TAILQ_INIT(&m->md.pv_list); + m->md.pv_memattr = VM_MEMATTR_DEFAULT; +} + +/* + * Initialize the pmap module. + * Called by vm_init, to initialize any structures that the pmap + * system needs to map virtual memory. + */ +void +pmap_init(void) +{ + int shpgperproc = PMAP_SHPGPERPROC; + + l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, + NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); + l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL, + NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); + + /* + * Initialize the PV entry allocator. + */ + pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, + NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); + TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); + pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; + uma_zone_reserve_kva(pvzone, pv_entry_max); + pv_entry_high_water = 9 * (pv_entry_max / 10); + + /* + * Now it is safe to enable pv_table recording. + */ + PDEBUG(1, printf("pmap_init: done!\n")); +} + +int +pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) +{ + struct l2_dtable *l2; + struct l2_bucket *l2b; + pd_entry_t *pl1pd, l1pd; + pt_entry_t *ptep, pte; + vm_paddr_t pa; + u_int l1idx; + int rv = 0; + + l1idx = L1_IDX(va); + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pm); + + /* + * If there is no l2_dtable for this address, then the process + * has no business accessing it. + * + * Note: This will catch userland processes trying to access + * kernel addresses. + */ + l2 = pm->pm_l2[L2_IDX(l1idx)]; + if (l2 == NULL) + goto out; + + /* + * Likewise if there is no L2 descriptor table + */ + l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; + if (l2b->l2b_kva == NULL) + goto out; + + /* + * Check the PTE itself. + */ + ptep = &l2b->l2b_kva[l2pte_index(va)]; + pte = *ptep; + if (pte == 0) + goto out; + + /* + * Catch a userland access to the vector page mapped at 0x0 + */ + if (user && (pte & L2_S_PROT_U) == 0) + goto out; + if (va == vector_page) + goto out; + + pa = l2pte_pa(pte); + + if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { + /* + * This looks like a good candidate for "page modified" + * emulation... + */ + struct pv_entry *pv; + struct vm_page *pg; + + /* Extract the physical address of the page */ + if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { + goto out; + } + /* Get the current flags for this page. */ + + pv = pmap_find_pv(pg, pm, va); + if (pv == NULL) { + goto out; + } + + /* + * Do the flags say this page is writable? If not then it + * is a genuine write fault. If yes then the write fault is + * our fault as we did not reflect the write access in the + * PTE. Now we know a write has occurred we can correct this + * and also set the modified bit + */ + if ((pv->pv_flags & PVF_WRITE) == 0) { + goto out; + } + + pg->md.pvh_attrs |= PVF_REF | PVF_MOD; + vm_page_dirty(pg); + pv->pv_flags |= PVF_REF | PVF_MOD; + + /* + * Re-enable write permissions for the page. No need to call + * pmap_fix_cache(), since this is just a + * modified-emulation fault, and the PVF_WRITE bit isn't + * changing. We've already set the cacheable bits based on + * the assumption that we can write to this page. + */ + *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; + PTE_SYNC(ptep); + rv = 1; + } else + if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { + /* + * This looks like a good candidate for "page referenced" + * emulation. + */ + struct pv_entry *pv; + struct vm_page *pg; + + /* Extract the physical address of the page */ + if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) + goto out; + /* Get the current flags for this page. */ + + pv = pmap_find_pv(pg, pm, va); + if (pv == NULL) + goto out; + + pg->md.pvh_attrs |= PVF_REF; + pv->pv_flags |= PVF_REF; + + + *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; + PTE_SYNC(ptep); + rv = 1; + } + + /* + * We know there is a valid mapping here, so simply + * fix up the L1 if necessary. + */ + pl1pd = &pm->pm_l1->l1_kva[l1idx]; + l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; + if (*pl1pd != l1pd) { + *pl1pd = l1pd; + PTE_SYNC(pl1pd); + rv = 1; + } + +#ifdef DEBUG + /* + * If 'rv == 0' at this point, it generally indicates that there is a + * stale TLB entry for the faulting address. This happens when two or + * more processes are sharing an L1. Since we don't flush the TLB on + * a context switch between such processes, we can take domain faults + * for mappings which exist at the same VA in both processes. EVEN IF + * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for + * example. + * + * This is extremely likely to happen if pmap_enter() updated the L1 + * entry for a recently entered mapping. In this case, the TLB is + * flushed for the new mapping, but there may still be TLB entries for + * other mappings belonging to other processes in the 1MB range + * covered by the L1 entry. + * + * Since 'rv == 0', we know that the L1 already contains the correct + * value, so the fault must be due to a stale TLB entry. + * + * Since we always need to flush the TLB anyway in the case where we + * fixed up the L1, or frobbed the L2 PTE, we effectively deal with + * stale TLB entries dynamically. + * + * However, the above condition can ONLY happen if the current L1 is + * being shared. If it happens when the L1 is unshared, it indicates + * that other parts of the pmap are not doing their job WRT managing + * the TLB. + */ + if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { + printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", + pm, (u_long)va, ftype); + printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", + l2, l2b, ptep, pl1pd); + printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", + pte, l1pd, last_fault_code); +#ifdef DDB + Debugger(); +#endif + } +#endif + + cpu_tlb_flushID_SE(va); + cpu_cpwait(); + + rv = 1; + +out: + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pm); + return (rv); +} + +void +pmap_postinit(void) +{ + struct l2_bucket *l2b; + struct l1_ttable *l1; + pd_entry_t *pl1pt; + pt_entry_t *ptep, pte; + vm_offset_t va, eva; + u_int loop, needed; + + needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); + needed -= 1; + l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); + + for (loop = 0; loop < needed; loop++, l1++) { + /* Allocate a L1 page table */ + va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, + 0xffffffff, L1_TABLE_SIZE, 0); + + if (va == 0) + panic("Cannot allocate L1 KVM"); + + eva = va + L1_TABLE_SIZE; + pl1pt = (pd_entry_t *)va; + + while (va < eva) { + l2b = pmap_get_l2_bucket(kernel_pmap, va); + ptep = &l2b->l2b_kva[l2pte_index(va)]; + pte = *ptep; + pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; + *ptep = pte; + PTE_SYNC(ptep); + cpu_tlb_flushD_SE(va); + + va += PAGE_SIZE; + } + pmap_init_l1(l1, pl1pt); + } + + +#ifdef DEBUG + printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", + needed); +#endif +} + +/* + * This is used to stuff certain critical values into the PCB where they + * can be accessed quickly from cpu_switch() et al. + */ +void +pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) +{ + struct l2_bucket *l2b; + + pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; + pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | + (DOMAIN_CLIENT << (pm->pm_domain * 2)); + + if (vector_page < KERNBASE) { + pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; + l2b = pmap_get_l2_bucket(pm, vector_page); + pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | + L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); + } else + pcb->pcb_pl1vec = NULL; +} + +void +pmap_activate(struct thread *td) +{ + pmap_t pm; + struct pcb *pcb; + + pm = vmspace_pmap(td->td_proc->p_vmspace); + pcb = td->td_pcb; + + critical_enter(); + pmap_set_pcb_pagedir(pm, pcb); + + if (td == curthread) { + u_int cur_dacr, cur_ttb; + + __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); + __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); + + cur_ttb &= ~(L1_TABLE_SIZE - 1); + + if (cur_ttb == (u_int)pcb->pcb_pagedir && + cur_dacr == pcb->pcb_dacr) { + /* + * No need to switch address spaces. + */ + critical_exit(); + return; + } + + + /* + * We MUST, I repeat, MUST fix up the L1 entry corresponding + * to 'vector_page' in the incoming L1 table before switching + * to it otherwise subsequent interrupts/exceptions (including + * domain faults!) will jump into hyperspace. + */ + if (pcb->pcb_pl1vec) { + + *pcb->pcb_pl1vec = pcb->pcb_l1vec; + /* + * Don't need to PTE_SYNC() at this point since + * cpu_setttb() is about to flush both the cache + * and the TLB. + */ + } + + cpu_domains(pcb->pcb_dacr); + cpu_setttb(pcb->pcb_pagedir); + } + critical_exit(); +} + +static int +pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) +{ + pd_entry_t *pdep, pde; + pt_entry_t *ptep, pte; + vm_offset_t pa; + int rv = 0; + + /* + * Make sure the descriptor itself has the correct cache mode + */ + pdep = &kl1[L1_IDX(va)]; + pde = *pdep; + + if (l1pte_section_p(pde)) { + if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { + *pdep = (pde & ~L1_S_CACHE_MASK) | + pte_l1_s_cache_mode_pt; + PTE_SYNC(pdep); + cpu_dcache_wbinv_range((vm_offset_t)pdep, + sizeof(*pdep)); + cpu_l2cache_wbinv_range((vm_offset_t)pdep, + sizeof(*pdep)); + rv = 1; + } + } else { + pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); + ptep = (pt_entry_t *)kernel_pt_lookup(pa); + if (ptep == NULL) + panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); + + ptep = &ptep[l2pte_index(va)]; + pte = *ptep; + if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { + *ptep = (pte & ~L2_S_CACHE_MASK) | + pte_l2_s_cache_mode_pt; + PTE_SYNC(ptep); + cpu_dcache_wbinv_range((vm_offset_t)ptep, + sizeof(*ptep)); + cpu_l2cache_wbinv_range((vm_offset_t)ptep, + sizeof(*ptep)); + rv = 1; + } + } + + return (rv); +} + +static void +pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, + pt_entry_t **ptep) +{ + vm_offset_t va = *availp; + struct l2_bucket *l2b; + + if (ptep) { + l2b = pmap_get_l2_bucket(kernel_pmap, va); + if (l2b == NULL) + panic("pmap_alloc_specials: no l2b for 0x%x", va); + + *ptep = &l2b->l2b_kva[l2pte_index(va)]; + } + + *vap = va; + *availp = va + (PAGE_SIZE * pages); +} + +/* + * Bootstrap the system enough to run with virtual memory. + * + * On the arm this is called after mapping has already been enabled + * and just syncs the pmap module with what has already been done. + * [We can't call it easily with mapping off since the kernel is not + * mapped with PA == VA, hence we would have to relocate every address + * from the linked base (virtual) address "KERNBASE" to the actual + * (physical) address starting relative to 0] + */ +#define PMAP_STATIC_L2_SIZE 16 +void +pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt) +{ + static struct l1_ttable static_l1; + static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; + struct l1_ttable *l1 = &static_l1; + struct l2_dtable *l2; + struct l2_bucket *l2b; + pd_entry_t pde; + pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; + pt_entry_t *ptep; + pt_entry_t *qmap_pte; + vm_paddr_t pa; + vm_offset_t va; + vm_size_t size; + int l1idx, l2idx, l2next = 0; + + PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n", + firstaddr, vm_max_kernel_address)); + + virtual_avail = firstaddr; + kernel_pmap->pm_l1 = l1; + kernel_l1pa = l1pt->pv_pa; + + /* + * Scan the L1 translation table created by initarm() and create + * the required metadata for all valid mappings found in it. + */ + for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { + pde = kernel_l1pt[l1idx]; + + /* + * We're only interested in Coarse mappings. + * pmap_extract() can deal with section mappings without + * recourse to checking L2 metadata. + */ + if ((pde & L1_TYPE_MASK) != L1_TYPE_C) + continue; + + /* + * Lookup the KVA of this L2 descriptor table + */ + pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); + ptep = (pt_entry_t *)kernel_pt_lookup(pa); + + if (ptep == NULL) { + panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", + (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); + } + + /* + * Fetch the associated L2 metadata structure. + * Allocate a new one if necessary. + */ + if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { + if (l2next == PMAP_STATIC_L2_SIZE) + panic("pmap_bootstrap: out of static L2s"); + kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = + &static_l2[l2next++]; + } + + /* + * One more L1 slot tracked... + */ + l2->l2_occupancy++; + + /* + * Fill in the details of the L2 descriptor in the + * appropriate bucket. + */ + l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; + l2b->l2b_kva = ptep; + l2b->l2b_phys = pa; + l2b->l2b_l1idx = l1idx; + + /* + * Establish an initial occupancy count for this descriptor + */ + for (l2idx = 0; + l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); + l2idx++) { + if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { + l2b->l2b_occupancy++; + } + } + + /* + * Make sure the descriptor itself has the correct cache mode. + * If not, fix it, but whine about the problem. Port-meisters + * should consider this a clue to fix up their initarm() + * function. :) + */ + if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { + printf("pmap_bootstrap: WARNING! wrong cache mode for " + "L2 pte @ %p\n", ptep); + } + } + + + /* + * Ensure the primary (kernel) L1 has the correct cache mode for + * a page table. Bitch if it is not correctly set. + */ + for (va = (vm_offset_t)kernel_l1pt; + va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { + if (pmap_set_pt_cache_mode(kernel_l1pt, va)) + printf("pmap_bootstrap: WARNING! wrong cache mode for " + "primary L1 @ 0x%x\n", va); + } + + cpu_dcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + cpu_tlb_flushID(); + cpu_cpwait(); + + PMAP_LOCK_INIT(kernel_pmap); + CPU_FILL(&kernel_pmap->pm_active); + kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; + TAILQ_INIT(&kernel_pmap->pm_pvlist); + + /* + * Initialize the global pv list lock. + */ + rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE); + + /* + * Reserve some special page table entries/VA space for temporary + * mapping of pages. + */ + pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); + pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); + pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); + pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); + pmap_alloc_specials(&virtual_avail, 1, &qmap_addr, &qmap_pte); + pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)qmap_pte); + size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) / + L1_S_SIZE; + pmap_alloc_specials(&virtual_avail, + round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, + &pmap_kernel_l2ptp_kva, NULL); + + size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; + pmap_alloc_specials(&virtual_avail, + round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, + &pmap_kernel_l2dtable_kva, NULL); + + pmap_alloc_specials(&virtual_avail, + 1, (vm_offset_t*)&_tmppt, NULL); + pmap_alloc_specials(&virtual_avail, + MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL); + SLIST_INIT(&l1_list); + TAILQ_INIT(&l1_lru_list); + mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); + pmap_init_l1(l1, kernel_l1pt); + cpu_dcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + + virtual_avail = round_page(virtual_avail); + virtual_end = vm_max_kernel_address; + kernel_vm_end = pmap_curmaxkvaddr; + mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF); + mtx_init(&qmap_mtx, "quick mapping mtx", NULL, MTX_DEF); + + pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb); +} + +/*************************************************** + * Pmap allocation/deallocation routines. + ***************************************************/ + +/* + * Release any resources held by the given physical map. + * Called when a pmap initialized by pmap_pinit is being released. + * Should only be called if the map contains no valid mappings. + */ +void +pmap_release(pmap_t pmap) +{ + struct pcb *pcb; + + pmap_idcache_wbinv_all(pmap); + cpu_l2cache_wbinv_all(); + pmap_tlb_flushID(pmap); + cpu_cpwait(); + if (vector_page < KERNBASE) { + struct pcb *curpcb = PCPU_GET(curpcb); + pcb = thread0.td_pcb; + if (pmap_is_current(pmap)) { + /* + * Frob the L1 entry corresponding to the vector + * page so that it contains the kernel pmap's domain + * number. This will ensure pmap_remove() does not + * pull the current vector page out from under us. + */ + critical_enter(); + *pcb->pcb_pl1vec = pcb->pcb_l1vec; + cpu_domains(pcb->pcb_dacr); + cpu_setttb(pcb->pcb_pagedir); + critical_exit(); + } + pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); + /* + * Make sure cpu_switch(), et al, DTRT. This is safe to do + * since this process has no remaining mappings of its own. + */ + curpcb->pcb_pl1vec = pcb->pcb_pl1vec; + curpcb->pcb_l1vec = pcb->pcb_l1vec; + curpcb->pcb_dacr = pcb->pcb_dacr; + curpcb->pcb_pagedir = pcb->pcb_pagedir; + + } + pmap_free_l1(pmap); + + dprintf("pmap_release()\n"); +} + + + +/* + * Helper function for pmap_grow_l2_bucket() + */ +static __inline int +pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) +{ + struct l2_bucket *l2b; + pt_entry_t *ptep; + vm_paddr_t pa; + struct vm_page *pg; + + pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); + if (pg == NULL) + return (1); + pa = VM_PAGE_TO_PHYS(pg); + + if (pap) + *pap = pa; + + l2b = pmap_get_l2_bucket(kernel_pmap, va); + + ptep = &l2b->l2b_kva[l2pte_index(va)]; + *ptep = L2_S_PROTO | pa | cache_mode | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); + PTE_SYNC(ptep); + return (0); +} + +/* + * This is the same as pmap_alloc_l2_bucket(), except that it is only + * used by pmap_growkernel(). + */ +static __inline struct l2_bucket * +pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) +{ + struct l2_dtable *l2; + struct l2_bucket *l2b; + struct l1_ttable *l1; + pd_entry_t *pl1pd; + u_short l1idx; + vm_offset_t nva; + + l1idx = L1_IDX(va); + + if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { + /* + * No mapping at this address, as there is + * no entry in the L1 table. + * Need to allocate a new l2_dtable. + */ + nva = pmap_kernel_l2dtable_kva; + if ((nva & PAGE_MASK) == 0) { + /* + * Need to allocate a backing page + */ + if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) + return (NULL); + } + + l2 = (struct l2_dtable *)nva; + nva += sizeof(struct l2_dtable); + + if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & + PAGE_MASK)) { + /* + * The new l2_dtable straddles a page boundary. + * Map in another page to cover it. + */ + if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) + return (NULL); + } + + pmap_kernel_l2dtable_kva = nva; + + /* + * Link it into the parent pmap + */ + pm->pm_l2[L2_IDX(l1idx)] = l2; + memset(l2, 0, sizeof(*l2)); + } + + l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; + + /* + * Fetch pointer to the L2 page table associated with the address. + */ + if (l2b->l2b_kva == NULL) { + pt_entry_t *ptep; + + /* + * No L2 page table has been allocated. Chances are, this + * is because we just allocated the l2_dtable, above. + */ + nva = pmap_kernel_l2ptp_kva; + ptep = (pt_entry_t *)nva; + if ((nva & PAGE_MASK) == 0) { + /* + * Need to allocate a backing page + */ + if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, + &pmap_kernel_l2ptp_phys)) + return (NULL); + PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); + } + memset(ptep, 0, L2_TABLE_SIZE_REAL); + l2->l2_occupancy++; + l2b->l2b_kva = ptep; + l2b->l2b_l1idx = l1idx; + l2b->l2b_phys = pmap_kernel_l2ptp_phys; + + pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; + pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; + } + + /* Distribute new L1 entry to all other L1s */ + SLIST_FOREACH(l1, &l1_list, l1_link) { + pl1pd = &l1->l1_kva[L1_IDX(va)]; + *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | + L1_C_PROTO; + PTE_SYNC(pl1pd); + } + + return (l2b); +} + + +/* + * grow the number of kernel page table entries, if needed + */ +void +pmap_growkernel(vm_offset_t addr) +{ + pmap_t kpm = kernel_pmap; + + if (addr <= pmap_curmaxkvaddr) + return; /* we are OK */ + + /* + * whoops! we need to add kernel PTPs + */ + + /* Map 1MB at a time */ + for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) + pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); + + /* + * flush out the cache, expensive but growkernel will happen so + * rarely + */ + cpu_dcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + cpu_tlb_flushD(); + cpu_cpwait(); + kernel_vm_end = pmap_curmaxkvaddr; +} + + +/* + * Remove all pages from specified address space + * this aids process exit speeds. Also, this code + * is special cased for current process only, but + * can have the more generic (and slightly slower) + * mode enabled. This is much faster than pmap_remove + * in the case of running down an entire address space. + */ +void +pmap_remove_pages(pmap_t pmap) +{ + struct pv_entry *pv, *npv; + struct l2_bucket *l2b = NULL; + vm_page_t m; + pt_entry_t *pt; + + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + cpu_idcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { + if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) { + /* Cannot remove wired or unmanaged pages now. */ + npv = TAILQ_NEXT(pv, pv_plist); + continue; + } + pmap->pm_stats.resident_count--; + l2b = pmap_get_l2_bucket(pmap, pv->pv_va); + KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); + pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; + m = PHYS_TO_VM_PAGE(*pt & L2_S_FRAME); + KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); + *pt = 0; + PTE_SYNC(pt); + npv = TAILQ_NEXT(pv, pv_plist); + pmap_nuke_pv(m, pmap, pv); + if (TAILQ_EMPTY(&m->md.pv_list)) + vm_page_aflag_clear(m, PGA_WRITEABLE); + pmap_free_pv_entry(pv); + pmap_free_l2_bucket(pmap, l2b, 1); + } + rw_wunlock(&pvh_global_lock); + cpu_tlb_flushID(); + cpu_cpwait(); + PMAP_UNLOCK(pmap); +} + + +/*************************************************** + * Low level mapping routines..... + ***************************************************/ + +#ifdef ARM_HAVE_SUPERSECTIONS +/* Map a super section into the KVA. */ + +void +pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags) +{ + pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) | + (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL, + VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); + struct l1_ttable *l1; + vm_offset_t va0, va_end; + + KASSERT(((va | pa) & L1_SUP_OFFSET) == 0, + ("Not a valid super section mapping")); + if (flags & SECTION_CACHE) + pd |= pte_l1_s_cache_mode; + else if (flags & SECTION_PT) + pd |= pte_l1_s_cache_mode_pt; + va0 = va & L1_SUP_FRAME; + va_end = va + L1_SUP_SIZE; + SLIST_FOREACH(l1, &l1_list, l1_link) { + va = va0; + for (; va < va_end; va += L1_S_SIZE) { + l1->l1_kva[L1_IDX(va)] = pd; + PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); + } + } +} +#endif + +/* Map a section into the KVA. */ + +void +pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) +{ + pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, + VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); + struct l1_ttable *l1; + + KASSERT(((va | pa) & L1_S_OFFSET) == 0, + ("Not a valid section mapping")); + if (flags & SECTION_CACHE) + pd |= pte_l1_s_cache_mode; + else if (flags & SECTION_PT) + pd |= pte_l1_s_cache_mode_pt; + SLIST_FOREACH(l1, &l1_list, l1_link) { + l1->l1_kva[L1_IDX(va)] = pd; + PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); + } +} + +/* + * Make a temporary mapping for a physical address. This is only intended + * to be used for panic dumps. + */ +void * +pmap_kenter_temporary(vm_paddr_t pa, int i) +{ + vm_offset_t va; + + va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); + pmap_kenter(va, pa); + return ((void *)crashdumpmap); +} + +/* + * add a wired page to the kva + * note that in order for the mapping to take effect -- you + * should do a invltlb after doing the pmap_kenter... + */ +static PMAP_INLINE void +pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) +{ + struct l2_bucket *l2b; + pt_entry_t *pte; + pt_entry_t opte; + struct pv_entry *pve; + vm_page_t m; + + PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", + (uint32_t) va, (uint32_t) pa)); + + + l2b = pmap_get_l2_bucket(kernel_pmap, va); + if (l2b == NULL) + l2b = pmap_grow_l2_bucket(kernel_pmap, va); + KASSERT(l2b != NULL, ("No L2 Bucket")); + pte = &l2b->l2b_kva[l2pte_index(va)]; + opte = *pte; + PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", + (uint32_t) pte, opte, *pte)); + if (l2pte_valid(opte)) { + pmap_kremove(va); + } else { + if (opte == 0) + l2b->l2b_occupancy++; + } + *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, + VM_PROT_READ | VM_PROT_WRITE); + if (flags & KENTER_CACHE) + *pte |= pte_l2_s_cache_mode; + if (flags & KENTER_USER) + *pte |= L2_S_PROT_U; + PTE_SYNC(pte); + + /* + * A kernel mapping may not be the page's only mapping, so create a PV + * entry to ensure proper caching. + * + * The existence test for the pvzone is used to delay the recording of + * kernel mappings until the VM system is fully initialized. + * + * This expects the physical memory to have a vm_page_array entry. + */ + if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) != NULL) { + rw_wlock(&pvh_global_lock); + if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) { + if ((pve = pmap_get_pv_entry()) == NULL) + panic("pmap_kenter_internal: no pv entries"); + PMAP_LOCK(kernel_pmap); + pmap_enter_pv(m, pve, kernel_pmap, va, + PVF_WRITE | PVF_UNMAN); + pmap_fix_cache(m, kernel_pmap, va); + PMAP_UNLOCK(kernel_pmap); + } else { + m->md.pv_kva = va; + } + rw_wunlock(&pvh_global_lock); + } +} + +void +pmap_kenter(vm_offset_t va, vm_paddr_t pa) +{ + pmap_kenter_internal(va, pa, KENTER_CACHE); +} + +void +pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) +{ + + pmap_kenter_internal(va, pa, 0); +} + +void +pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) +{ + vm_offset_t sva; + + KASSERT((size & PAGE_MASK) == 0, + ("%s: device mapping not page-sized", __func__)); + + sva = va; + while (size != 0) { + pmap_kenter_internal(va, pa, 0); + va += PAGE_SIZE; + pa += PAGE_SIZE; + size -= PAGE_SIZE; + } +} + +void +pmap_kremove_device(vm_offset_t va, vm_size_t size) +{ + vm_offset_t sva; + + KASSERT((size & PAGE_MASK) == 0, + ("%s: device mapping not page-sized", __func__)); + + sva = va; + while (size != 0) { + pmap_kremove(va); + va += PAGE_SIZE; + size -= PAGE_SIZE; + } +} + +void +pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) +{ + + pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); + /* + * Call pmap_fault_fixup now, to make sure we'll have no exception + * at the first use of the new address, or bad things will happen, + * as we use one of these addresses in the exception handlers. + */ + pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1); +} + +vm_paddr_t +pmap_kextract(vm_offset_t va) +{ + + return (pmap_extract_locked(kernel_pmap, va)); +} + +/* + * remove a page from the kernel pagetables + */ +void +pmap_kremove(vm_offset_t va) +{ + struct l2_bucket *l2b; + pt_entry_t *pte, opte; + struct pv_entry *pve; + vm_page_t m; + vm_offset_t pa; + + l2b = pmap_get_l2_bucket(kernel_pmap, va); + if (!l2b) + return; + KASSERT(l2b != NULL, ("No L2 Bucket")); + pte = &l2b->l2b_kva[l2pte_index(va)]; + opte = *pte; + if (l2pte_valid(opte)) { + /* pa = vtophs(va) taken from pmap_extract() */ + if ((opte & L2_TYPE_MASK) == L2_TYPE_L) + pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET); + else + pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET); + /* note: should never have to remove an allocation + * before the pvzone is initialized. + */ + rw_wlock(&pvh_global_lock); + PMAP_LOCK(kernel_pmap); + if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && + (pve = pmap_remove_pv(m, kernel_pmap, va))) + pmap_free_pv_entry(pve); + PMAP_UNLOCK(kernel_pmap); + rw_wunlock(&pvh_global_lock); + va = va & ~PAGE_MASK; + cpu_dcache_wbinv_range(va, PAGE_SIZE); + cpu_l2cache_wbinv_range(va, PAGE_SIZE); + cpu_tlb_flushD_SE(va); + cpu_cpwait(); + *pte = 0; + } +} + + +/* + * Used to map a range of physical addresses into kernel + * virtual address space. + * + * The value passed in '*virt' is a suggested virtual address for + * the mapping. Architectures which can support a direct-mapped + * physical to virtual region can return the appropriate address + * within that region, leaving '*virt' unchanged. Other + * architectures should map the pages starting at '*virt' and + * update '*virt' with the first usable address after the mapped + * region. + */ +vm_offset_t +pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) +{ + vm_offset_t sva = *virt; + vm_offset_t va = sva; + + PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " + "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, + prot)); + + while (start < end) { + pmap_kenter(va, start); + va += PAGE_SIZE; + start += PAGE_SIZE; + } + *virt = va; + return (sva); +} + +static void +pmap_wb_page(vm_page_t m) +{ + struct pv_entry *pv; + + TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) + pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE, + (pv->pv_flags & PVF_WRITE) == 0); +} + +static void +pmap_inv_page(vm_page_t m) +{ + struct pv_entry *pv; + + TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) + pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE); +} +/* + * Add a list of wired pages to the kva + * this routine is only used for temporary + * kernel mappings that do not need to have + * page modification or references recorded. + * Note that old mappings are simply written + * over. The page *must* be wired. + */ +void +pmap_qenter(vm_offset_t va, vm_page_t *m, int count) +{ + int i; + + for (i = 0; i < count; i++) { + pmap_wb_page(m[i]); + pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), + KENTER_CACHE); + va += PAGE_SIZE; + } +} + + +/* + * this routine jerks page mappings from the + * kernel -- it is meant only for temporary mappings. + */ +void +pmap_qremove(vm_offset_t va, int count) +{ + vm_paddr_t pa; + int i; + + for (i = 0; i < count; i++) { + pa = vtophys(va); + if (pa) { + pmap_inv_page(PHYS_TO_VM_PAGE(pa)); + pmap_kremove(va); + } + va += PAGE_SIZE; + } +} + + +/* + * pmap_object_init_pt preloads the ptes for a given object + * into the specified pmap. This eliminates the blast of soft + * faults on process startup and immediately after an mmap. + */ +void +pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, + vm_pindex_t pindex, vm_size_t size) +{ + + VM_OBJECT_ASSERT_WLOCKED(object); + KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, + ("pmap_object_init_pt: non-device object")); +} + + +/* + * pmap_is_prefaultable: + * + * Return whether or not the specified virtual address is elgible + * for prefault. + */ +boolean_t +pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) +{ + pd_entry_t *pde; + pt_entry_t *pte; + + if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) + return (FALSE); + KASSERT(pte != NULL, ("Valid mapping but no pte ?")); + if (*pte == 0) + return (TRUE); + return (FALSE); +} + +/* + * Fetch pointers to the PDE/PTE for the given pmap/VA pair. + * Returns TRUE if the mapping exists, else FALSE. + * + * NOTE: This function is only used by a couple of arm-specific modules. + * It is not safe to take any pmap locks here, since we could be right + * in the middle of debugging the pmap anyway... + * + * It is possible for this routine to return FALSE even though a valid + * mapping does exist. This is because we don't lock, so the metadata + * state may be inconsistent. + * + * NOTE: We can return a NULL *ptp in the case where the L1 pde is + * a "section" mapping. + */ +boolean_t +pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) +{ + struct l2_dtable *l2; + pd_entry_t *pl1pd, l1pd; + pt_entry_t *ptep; + u_short l1idx; + + if (pm->pm_l1 == NULL) + return (FALSE); + + l1idx = L1_IDX(va); + *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; + l1pd = *pl1pd; + + if (l1pte_section_p(l1pd)) { + *ptp = NULL; + return (TRUE); + } + + if (pm->pm_l2 == NULL) + return (FALSE); + + l2 = pm->pm_l2[L2_IDX(l1idx)]; + + if (l2 == NULL || + (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { + return (FALSE); + } + + *ptp = &ptep[l2pte_index(va)]; + return (TRUE); +} + +/* + * Routine: pmap_remove_all + * Function: + * Removes this physical page from + * all physical maps in which it resides. + * Reflects back modify bits to the pager. + * + * Notes: + * Original versions of this routine were very + * inefficient because they iteratively called + * pmap_remove (slow...) + */ +void +pmap_remove_all(vm_page_t m) +{ + pv_entry_t pv; + pt_entry_t *ptep; + struct l2_bucket *l2b; + boolean_t flush = FALSE; + pmap_t curpm; + int flags = 0; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_remove_all: page %p is not managed", m)); + if (TAILQ_EMPTY(&m->md.pv_list)) + return; + rw_wlock(&pvh_global_lock); + + /* + * XXX This call shouldn't exist. Iterating over the PV list twice, + * once in pmap_clearbit() and again below, is both unnecessary and + * inefficient. The below code should itself write back the cache + * entry before it destroys the mapping. + */ + pmap_clearbit(m, PVF_WRITE); + curpm = vmspace_pmap(curproc->p_vmspace); + while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { + if (flush == FALSE && (pv->pv_pmap == curpm || + pv->pv_pmap == kernel_pmap)) + flush = TRUE; + + PMAP_LOCK(pv->pv_pmap); + /* + * Cached contents were written-back in pmap_clearbit(), + * but we still have to invalidate the cache entry to make + * sure stale data are not retrieved when another page will be + * mapped under this virtual address. + */ + if (pmap_is_current(pv->pv_pmap)) { + cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE); + if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va)) + cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE); + } + + if (pv->pv_flags & PVF_UNMAN) { + /* remove the pv entry, but do not remove the mapping + * and remember this is a kernel mapped page + */ + m->md.pv_kva = pv->pv_va; + } else { + /* remove the mapping and pv entry */ + l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); + KASSERT(l2b != NULL, ("No l2 bucket")); + ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; + *ptep = 0; + PTE_SYNC_CURRENT(pv->pv_pmap, ptep); + pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); + pv->pv_pmap->pm_stats.resident_count--; + flags |= pv->pv_flags; + } + pmap_nuke_pv(m, pv->pv_pmap, pv); + PMAP_UNLOCK(pv->pv_pmap); + pmap_free_pv_entry(pv); + } + + if (flush) { + if (PV_BEEN_EXECD(flags)) + pmap_tlb_flushID(curpm); + else + pmap_tlb_flushD(curpm); + } + vm_page_aflag_clear(m, PGA_WRITEABLE); + rw_wunlock(&pvh_global_lock); +} + + +/* + * Set the physical protection on the + * specified range of this map as requested. + */ +void +pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) +{ + struct l2_bucket *l2b; + pt_entry_t *ptep, pte; + vm_offset_t next_bucket; + u_int flags; + int flush; + + CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x", + pm, sva, eva, prot); + + if ((prot & VM_PROT_READ) == 0) { + pmap_remove(pm, sva, eva); + return; + } + + if (prot & VM_PROT_WRITE) { + /* + * If this is a read->write transition, just ignore it and let + * vm_fault() take care of it later. + */ + return; + } + + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pm); + + /* + * OK, at this point, we know we're doing write-protect operation. + * If the pmap is active, write-back the range. + */ + pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); + + flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; + flags = 0; + + while (sva < eva) { + next_bucket = L2_NEXT_BUCKET(sva); + if (next_bucket > eva) + next_bucket = eva; + + l2b = pmap_get_l2_bucket(pm, sva); + if (l2b == NULL) { + sva = next_bucket; + continue; + } + + ptep = &l2b->l2b_kva[l2pte_index(sva)]; + + while (sva < next_bucket) { + if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { + struct vm_page *pg; + u_int f; + + pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); + pte &= ~L2_S_PROT_W; + *ptep = pte; + PTE_SYNC(ptep); + + if (!(pg->oflags & VPO_UNMANAGED)) { + f = pmap_modify_pv(pg, pm, sva, + PVF_WRITE, 0); + if (f & PVF_WRITE) + vm_page_dirty(pg); + } else + f = 0; + + if (flush >= 0) { + flush++; + flags |= f; + } else + if (PV_BEEN_EXECD(f)) + pmap_tlb_flushID_SE(pm, sva); + else + if (PV_BEEN_REFD(f)) + pmap_tlb_flushD_SE(pm, sva); + } + + sva += PAGE_SIZE; + ptep++; + } + } + + + if (flush) { + if (PV_BEEN_EXECD(flags)) + pmap_tlb_flushID(pm); + else + if (PV_BEEN_REFD(flags)) + pmap_tlb_flushD(pm); + } + rw_wunlock(&pvh_global_lock); + + PMAP_UNLOCK(pm); +} + + +/* + * Insert the given physical page (p) at + * the specified virtual address (v) in the + * target physical map with the protection requested. + * + * If specified, the page will be wired down, meaning + * that the related pte can not be reclaimed. + * + * NB: This is the only routine which MAY NOT lazy-evaluate + * or lose information. That is, this routine must actually + * insert this page into the given map NOW. + */ + +int +pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags, int8_t psind __unused) +{ + int rv; + + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + rv = pmap_enter_locked(pmap, va, m, prot, flags); + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); + return (rv); +} + +/* + * The pvh global and pmap locks must be held. + */ +static int +pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, + u_int flags) +{ + struct l2_bucket *l2b = NULL; + struct vm_page *opg; + struct pv_entry *pve = NULL; + pt_entry_t *ptep, npte, opte; + u_int nflags; + u_int oflags; + vm_paddr_t pa; + + PMAP_ASSERT_LOCKED(pmap); + rw_assert(&pvh_global_lock, RA_WLOCKED); + if (va == vector_page) { + pa = systempage.pv_pa; + m = NULL; + } else { + if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) + VM_OBJECT_ASSERT_LOCKED(m->object); + pa = VM_PAGE_TO_PHYS(m); + } + nflags = 0; + if (prot & VM_PROT_WRITE) + nflags |= PVF_WRITE; + if (prot & VM_PROT_EXECUTE) + nflags |= PVF_EXEC; + if ((flags & PMAP_ENTER_WIRED) != 0) + nflags |= PVF_WIRED; + PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " + "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); + + if (pmap == kernel_pmap) { + l2b = pmap_get_l2_bucket(pmap, va); + if (l2b == NULL) + l2b = pmap_grow_l2_bucket(pmap, va); + } else { +do_l2b_alloc: + l2b = pmap_alloc_l2_bucket(pmap, va); + if (l2b == NULL) { + if ((flags & PMAP_ENTER_NOSLEEP) == 0) { + PMAP_UNLOCK(pmap); + rw_wunlock(&pvh_global_lock); + VM_WAIT; + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + goto do_l2b_alloc; + } + return (KERN_RESOURCE_SHORTAGE); + } + } + + ptep = &l2b->l2b_kva[l2pte_index(va)]; + + opte = *ptep; + npte = pa; + oflags = 0; + if (opte) { + /* + * There is already a mapping at this address. + * If the physical address is different, lookup the + * vm_page. + */ + if (l2pte_pa(opte) != pa) + opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); + else + opg = m; + } else + opg = NULL; + + if ((prot & (VM_PROT_ALL)) || + (!m || m->md.pvh_attrs & PVF_REF)) { + /* + * - The access type indicates that we don't need + * to do referenced emulation. + * OR + * - The physical page has already been referenced + * so no need to re-do referenced emulation here. + */ + npte |= L2_S_PROTO; + + nflags |= PVF_REF; + + if (m && ((prot & VM_PROT_WRITE) != 0 || + (m->md.pvh_attrs & PVF_MOD))) { + /* + * This is a writable mapping, and the + * page's mod state indicates it has + * already been modified. Make it + * writable from the outset. + */ + nflags |= PVF_MOD; + if (!(m->md.pvh_attrs & PVF_MOD)) + vm_page_dirty(m); + } + if (m && opte) + vm_page_aflag_set(m, PGA_REFERENCED); + } else { + /* + * Need to do page referenced emulation. + */ + npte |= L2_TYPE_INV; + } + + if (prot & VM_PROT_WRITE) { + npte |= L2_S_PROT_W; + if (m != NULL && + (m->oflags & VPO_UNMANAGED) == 0) + vm_page_aflag_set(m, PGA_WRITEABLE); + } + if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) + npte |= pte_l2_s_cache_mode; + if (m && m == opg) { + /* + * We're changing the attrs of an existing mapping. + */ + oflags = pmap_modify_pv(m, pmap, va, + PVF_WRITE | PVF_EXEC | PVF_WIRED | + PVF_MOD | PVF_REF, nflags); + + /* + * We may need to flush the cache if we're + * doing rw-ro... + */ + if (pmap_is_current(pmap) && + (oflags & PVF_NC) == 0 && + (opte & L2_S_PROT_W) != 0 && + (prot & VM_PROT_WRITE) == 0 && + (opte & L2_TYPE_MASK) != L2_TYPE_INV) { + cpu_dcache_wb_range(va, PAGE_SIZE); + cpu_l2cache_wb_range(va, PAGE_SIZE); + } + } else { + /* + * New mapping, or changing the backing page + * of an existing mapping. + */ + if (opg) { + /* + * Replacing an existing mapping with a new one. + * It is part of our managed memory so we + * must remove it from the PV list + */ + if ((pve = pmap_remove_pv(opg, pmap, va))) { + + /* note for patch: the oflags/invalidation was moved + * because PG_FICTITIOUS pages could free the pve + */ + oflags = pve->pv_flags; + /* + * If the old mapping was valid (ref/mod + * emulation creates 'invalid' mappings + * initially) then make sure to frob + * the cache. + */ + if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { + if (PV_BEEN_EXECD(oflags)) { + pmap_idcache_wbinv_range(pmap, va, + PAGE_SIZE); + } else + if (PV_BEEN_REFD(oflags)) { + pmap_dcache_wb_range(pmap, va, + PAGE_SIZE, TRUE, + (oflags & PVF_WRITE) == 0); + } + } + + /* free/allocate a pv_entry for UNMANAGED pages if + * this physical page is not/is already mapped. + */ + + if (m && (m->oflags & VPO_UNMANAGED) && + !m->md.pv_kva && + TAILQ_EMPTY(&m->md.pv_list)) { + pmap_free_pv_entry(pve); + pve = NULL; + } + } else if (m && + (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || + !TAILQ_EMPTY(&m->md.pv_list))) + pve = pmap_get_pv_entry(); + } else if (m && + (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || + !TAILQ_EMPTY(&m->md.pv_list))) + pve = pmap_get_pv_entry(); + + if (m) { + if ((m->oflags & VPO_UNMANAGED)) { + if (!TAILQ_EMPTY(&m->md.pv_list) || + m->md.pv_kva) { + KASSERT(pve != NULL, ("No pv")); + nflags |= PVF_UNMAN; + pmap_enter_pv(m, pve, pmap, va, nflags); + } else + m->md.pv_kva = va; + } else { + KASSERT(va < kmi.clean_sva || + va >= kmi.clean_eva, + ("pmap_enter: managed mapping within the clean submap")); + KASSERT(pve != NULL, ("No pv")); + pmap_enter_pv(m, pve, pmap, va, nflags); + } + } + } + /* + * Make sure userland mappings get the right permissions + */ + if (pmap != kernel_pmap && va != vector_page) { + npte |= L2_S_PROT_U; + } + + /* + * Keep the stats up to date + */ + if (opte == 0) { + l2b->l2b_occupancy++; + pmap->pm_stats.resident_count++; + } + + /* + * If this is just a wiring change, the two PTEs will be + * identical, so there's no need to update the page table. + */ + if (npte != opte) { + boolean_t is_cached = pmap_is_current(pmap); + + *ptep = npte; + if (is_cached) { + /* + * We only need to frob the cache/tlb if this pmap + * is current + */ + PTE_SYNC(ptep); + if (L1_IDX(va) != L1_IDX(vector_page) && + l2pte_valid(npte)) { + /* + * This mapping is likely to be accessed as + * soon as we return to userland. Fix up the + * L1 entry to avoid taking another + * page/domain fault. + */ + pd_entry_t *pl1pd, l1pd; + + pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; + l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | + L1_C_PROTO; + if (*pl1pd != l1pd) { + *pl1pd = l1pd; + PTE_SYNC(pl1pd); + } + } + } + + if (PV_BEEN_EXECD(oflags)) + pmap_tlb_flushID_SE(pmap, va); + else if (PV_BEEN_REFD(oflags)) + pmap_tlb_flushD_SE(pmap, va); + + + if (m) + pmap_fix_cache(m, pmap, va); + } + return (KERN_SUCCESS); +} + +/* + * Maps a sequence of resident pages belonging to the same object. + * The sequence begins with the given page m_start. This page is + * mapped at the given virtual address start. Each subsequent page is + * mapped at a virtual address that is offset from start by the same + * amount as the page is offset from m_start within the object. The + * last page in the sequence is the page with the largest offset from + * m_start that can be mapped at a virtual address less than the given + * virtual address end. Not every virtual page between start and end + * is mapped; only those for which a resident page exists with the + * corresponding offset from m_start are mapped. + */ +void +pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, + vm_page_t m_start, vm_prot_t prot) +{ + vm_page_t m; + vm_pindex_t diff, psize; + + VM_OBJECT_ASSERT_LOCKED(m_start->object); + + psize = atop(end - start); + m = m_start; + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { + pmap_enter_locked(pmap, start + ptoa(diff), m, prot & + (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP); + m = TAILQ_NEXT(m, listq); + } + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); +} + +/* + * this code makes some *MAJOR* assumptions: + * 1. Current pmap & pmap exists. + * 2. Not wired. + * 3. Read access. + * 4. No page table pages. + * but is *MUCH* faster than pmap_enter... + */ + +void +pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) +{ + + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), + PMAP_ENTER_NOSLEEP); + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); +} + +/* + * Clear the wired attribute from the mappings for the specified range of + * addresses in the given pmap. Every valid mapping within that range + * must have the wired attribute set. In contrast, invalid mappings + * cannot have the wired attribute set, so they are ignored. + * + * XXX Wired mappings of unmanaged pages cannot be counted by this pmap + * implementation. + */ +void +pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) +{ + struct l2_bucket *l2b; + pt_entry_t *ptep, pte; + pv_entry_t pv; + vm_offset_t next_bucket; + vm_page_t m; + + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pmap); + while (sva < eva) { + next_bucket = L2_NEXT_BUCKET(sva); + if (next_bucket > eva) + next_bucket = eva; + l2b = pmap_get_l2_bucket(pmap, sva); + if (l2b == NULL) { + sva = next_bucket; + continue; + } + for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket; + sva += PAGE_SIZE, ptep++) { + if ((pte = *ptep) == 0 || + (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL || + (m->oflags & VPO_UNMANAGED) != 0) + continue; + pv = pmap_find_pv(m, pmap, sva); + if ((pv->pv_flags & PVF_WIRED) == 0) + panic("pmap_unwire: pv %p isn't wired", pv); + pv->pv_flags &= ~PVF_WIRED; + pmap->pm_stats.wired_count--; + } + } + rw_wunlock(&pvh_global_lock); + PMAP_UNLOCK(pmap); +} + + +/* + * Copy the range specified by src_addr/len + * from the source map to the range dst_addr/len + * in the destination map. + * + * This routine is only advisory and need not do anything. + */ +void +pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, + vm_size_t len, vm_offset_t src_addr) +{ +} + + +/* + * Routine: pmap_extract + * Function: + * Extract the physical page address associated + * with the given map/virtual_address pair. + */ +vm_paddr_t +pmap_extract(pmap_t pmap, vm_offset_t va) +{ + vm_paddr_t pa; + + PMAP_LOCK(pmap); + pa = pmap_extract_locked(pmap, va); + PMAP_UNLOCK(pmap); + return (pa); +} + +static vm_paddr_t +pmap_extract_locked(pmap_t pmap, vm_offset_t va) +{ + struct l2_dtable *l2; + pd_entry_t l1pd; + pt_entry_t *ptep, pte; + vm_paddr_t pa; + u_int l1idx; + + if (pmap != kernel_pmap) + PMAP_ASSERT_LOCKED(pmap); + l1idx = L1_IDX(va); + l1pd = pmap->pm_l1->l1_kva[l1idx]; + if (l1pte_section_p(l1pd)) { + /* + * These should only happen for the kernel pmap. + */ + KASSERT(pmap == kernel_pmap, ("unexpected section")); + /* XXX: what to do about the bits > 32 ? */ + if (l1pd & L1_S_SUPERSEC) + pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); + else + pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); + } else { + /* + * Note that we can't rely on the validity of the L1 + * descriptor as an indication that a mapping exists. + * We have to look it up in the L2 dtable. + */ + l2 = pmap->pm_l2[L2_IDX(l1idx)]; + if (l2 == NULL || + (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) + return (0); + pte = ptep[l2pte_index(va)]; + if (pte == 0) + return (0); + if ((pte & L2_TYPE_MASK) == L2_TYPE_L) + pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); + else + pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); + } + return (pa); +} + +/* + * Atomically extract and hold the physical page with the given + * pmap and virtual address pair if that mapping permits the given + * protection. + * + */ +vm_page_t +pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) +{ + struct l2_dtable *l2; + pd_entry_t l1pd; + pt_entry_t *ptep, pte; + vm_paddr_t pa, paddr; + vm_page_t m = NULL; + u_int l1idx; + l1idx = L1_IDX(va); + paddr = 0; + + PMAP_LOCK(pmap); +retry: + l1pd = pmap->pm_l1->l1_kva[l1idx]; + if (l1pte_section_p(l1pd)) { + /* + * These should only happen for kernel_pmap + */ + KASSERT(pmap == kernel_pmap, ("huh")); + /* XXX: what to do about the bits > 32 ? */ + if (l1pd & L1_S_SUPERSEC) + pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); + else + pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); + if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) + goto retry; + if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { + m = PHYS_TO_VM_PAGE(pa); + vm_page_hold(m); + } + + } else { + /* + * Note that we can't rely on the validity of the L1 + * descriptor as an indication that a mapping exists. + * We have to look it up in the L2 dtable. + */ + l2 = pmap->pm_l2[L2_IDX(l1idx)]; + + if (l2 == NULL || + (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { + PMAP_UNLOCK(pmap); + return (NULL); + } + + ptep = &ptep[l2pte_index(va)]; + pte = *ptep; + + if (pte == 0) { + PMAP_UNLOCK(pmap); + return (NULL); + } + if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { + if ((pte & L2_TYPE_MASK) == L2_TYPE_L) + pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); + else + pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); + if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) + goto retry; + m = PHYS_TO_VM_PAGE(pa); + vm_page_hold(m); + } + } + + PMAP_UNLOCK(pmap); + PA_UNLOCK_COND(paddr); + return (m); +} + +vm_paddr_t +pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) +{ + struct l2_dtable *l2; + pd_entry_t l1pd; + pt_entry_t *ptep, pte; + vm_paddr_t pa; + u_int l1idx; + + l1idx = L1_IDX(va); + l1pd = kernel_pmap->pm_l1->l1_kva[l1idx]; + if (l1pte_section_p(l1pd)) { + if (l1pd & L1_S_SUPERSEC) + pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); + else + pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); + pte = L2_S_PROTO | pa | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); + } else { + l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]; + if (l2 == NULL || + (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { + pte = 0; + pa = 0; + goto out; + } + pte = ptep[l2pte_index(va)]; + if (pte == 0) { + pa = 0; + goto out; + } + if ((pte & L2_TYPE_MASK) == L2_TYPE_L) + pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); + else + pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); + } +out: + if (pte2p != NULL) + *pte2p = pte; + return (pa); +} + +/* + * Initialize a preallocated and zeroed pmap structure, + * such as one in a vmspace structure. + */ + +int +pmap_pinit(pmap_t pmap) +{ + PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); + + pmap_alloc_l1(pmap); + bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); + + CPU_ZERO(&pmap->pm_active); + + TAILQ_INIT(&pmap->pm_pvlist); + bzero(&pmap->pm_stats, sizeof pmap->pm_stats); + pmap->pm_stats.resident_count = 1; + if (vector_page < KERNBASE) { + pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), + VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0); + } + return (1); +} + + +/*************************************************** + * page management routines. + ***************************************************/ + + +static void +pmap_free_pv_entry(pv_entry_t pv) +{ + pv_entry_count--; + uma_zfree(pvzone, pv); +} + + +/* + * get a new pv_entry, allocating a block from the system + * when needed. + * the memory allocation is performed bypassing the malloc code + * because of the possibility of allocations at interrupt time. + */ +static pv_entry_t +pmap_get_pv_entry(void) +{ + pv_entry_t ret_value; + + pv_entry_count++; + if (pv_entry_count > pv_entry_high_water) + pagedaemon_wakeup(); + ret_value = uma_zalloc(pvzone, M_NOWAIT); + return ret_value; +} + +/* + * Remove the given range of addresses from the specified map. + * + * It is assumed that the start and end are properly + * rounded to the page size. + */ +#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 +void +pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) +{ + struct l2_bucket *l2b; + vm_offset_t next_bucket; + pt_entry_t *ptep; + u_int total; + u_int mappings, is_exec, is_refd; + int flushall = 0; + + + /* + * we lock in the pmap => pv_head direction + */ + + rw_wlock(&pvh_global_lock); + PMAP_LOCK(pm); + total = 0; + while (sva < eva) { + /* + * Do one L2 bucket's worth at a time. + */ + next_bucket = L2_NEXT_BUCKET(sva); + if (next_bucket > eva) + next_bucket = eva; + + l2b = pmap_get_l2_bucket(pm, sva); + if (l2b == NULL) { + sva = next_bucket; + continue; + } + + ptep = &l2b->l2b_kva[l2pte_index(sva)]; + mappings = 0; + + while (sva < next_bucket) { + struct vm_page *pg; + pt_entry_t pte; + vm_paddr_t pa; + + pte = *ptep; + + if (pte == 0) { + /* + * Nothing here, move along + */ + sva += PAGE_SIZE; + ptep++; + continue; + } + + pm->pm_stats.resident_count--; + pa = l2pte_pa(pte); + is_exec = 0; + is_refd = 1; + + /* + * Update flags. In a number of circumstances, + * we could cluster a lot of these and do a + * number of sequential pages in one go. + */ + if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { + struct pv_entry *pve; + + pve = pmap_remove_pv(pg, pm, sva); + if (pve) { + is_exec = PV_BEEN_EXECD(pve->pv_flags); + is_refd = PV_BEEN_REFD(pve->pv_flags); + pmap_free_pv_entry(pve); + } + } + + if (l2pte_valid(pte) && pmap_is_current(pm)) { + if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) { + total++; + if (is_exec) { + cpu_idcache_wbinv_range(sva, + PAGE_SIZE); + cpu_l2cache_wbinv_range(sva, + PAGE_SIZE); + cpu_tlb_flushID_SE(sva); + } else if (is_refd) { + cpu_dcache_wbinv_range(sva, + PAGE_SIZE); + cpu_l2cache_wbinv_range(sva, + PAGE_SIZE); + cpu_tlb_flushD_SE(sva); + } + } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) { + /* flushall will also only get set for + * for a current pmap + */ + cpu_idcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + flushall = 1; + total++; + } + } + *ptep = 0; + PTE_SYNC(ptep); + + sva += PAGE_SIZE; + ptep++; + mappings++; + } + + pmap_free_l2_bucket(pm, l2b, mappings); + } + + rw_wunlock(&pvh_global_lock); + if (flushall) + cpu_tlb_flushID(); + PMAP_UNLOCK(pm); +} + +/* + * pmap_zero_page() + * + * Zero a given physical page by mapping it at a page hook point. + * In doing the zero page op, the page we zero is mapped cachable, as with + * StrongARM accesses to non-cached pages are non-burst making writing + * _any_ bulk data very slow. + */ +#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_CORE3) +void +pmap_zero_page_generic(vm_paddr_t phys, int off, int size) +{ + + if (_arm_bzero && size >= _min_bzero_size && + _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) + return; + + mtx_lock(&cmtx); + /* + * Hook in the page, zero it, invalidate the TLB as needed. + * + * Note the temporary zero-page mapping must be a non-cached page in + * order to work without corruption when write-allocate is enabled. + */ + *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + if (off || size != PAGE_SIZE) + bzero((void *)(cdstp + off), size); + else + bzero_page(cdstp); + + mtx_unlock(&cmtx); +} +#endif /* ARM_MMU_GENERIC != 0 */ + +#if ARM_MMU_XSCALE == 1 +void +pmap_zero_page_xscale(vm_paddr_t phys, int off, int size) +{ + + if (_arm_bzero && size >= _min_bzero_size && + _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) + return; + + mtx_lock(&cmtx); + /* + * Hook in the page, zero it, and purge the cache for that + * zeroed page. Invalidate the TLB as needed. + */ + *cdst_pte = L2_S_PROTO | phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + if (off || size != PAGE_SIZE) + bzero((void *)(cdstp + off), size); + else + bzero_page(cdstp); + mtx_unlock(&cmtx); + xscale_cache_clean_minidata(); +} + +/* + * Change the PTEs for the specified kernel mappings such that they + * will use the mini data cache instead of the main data cache. + */ +void +pmap_use_minicache(vm_offset_t va, vm_size_t size) +{ + struct l2_bucket *l2b; + pt_entry_t *ptep, *sptep, pte; + vm_offset_t next_bucket, eva; + +#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3) + if (xscale_use_minidata == 0) + return; +#endif + + eva = va + size; + + while (va < eva) { + next_bucket = L2_NEXT_BUCKET(va); + if (next_bucket > eva) + next_bucket = eva; + + l2b = pmap_get_l2_bucket(kernel_pmap, va); + + sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; + + while (va < next_bucket) { + pte = *ptep; + if (!l2pte_minidata(pte)) { + cpu_dcache_wbinv_range(va, PAGE_SIZE); + cpu_tlb_flushD_SE(va); + *ptep = pte & ~L2_B; + } + ptep++; + va += PAGE_SIZE; + } + PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); + } + cpu_cpwait(); +} +#endif /* ARM_MMU_XSCALE == 1 */ + +/* + * pmap_zero_page zeros the specified hardware page by mapping + * the page into KVM and using bzero to clear its contents. + */ +void +pmap_zero_page(vm_page_t m) +{ + pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); +} + + +/* + * pmap_zero_page_area zeros the specified hardware page by mapping + * the page into KVM and using bzero to clear its contents. + * + * off and size may not cover an area beyond a single hardware page. + */ +void +pmap_zero_page_area(vm_page_t m, int off, int size) +{ + + pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); +} + + +/* + * pmap_zero_page_idle zeros the specified hardware page by mapping + * the page into KVM and using bzero to clear its contents. This + * is intended to be called from the vm_pagezero process only and + * outside of Giant. + */ +void +pmap_zero_page_idle(vm_page_t m) +{ + + pmap_zero_page(m); +} + +#if 0 +/* + * pmap_clean_page() + * + * This is a local function used to work out the best strategy to clean + * a single page referenced by its entry in the PV table. It should be used by + * pmap_copy_page, pmap_zero page and maybe some others later on. + * + * Its policy is effectively: + * o If there are no mappings, we don't bother doing anything with the cache. + * o If there is one mapping, we clean just that page. + * o If there are multiple mappings, we clean the entire cache. + * + * So that some functions can be further optimised, it returns 0 if it didn't + * clean the entire cache, or 1 if it did. + * + * XXX One bug in this routine is that if the pv_entry has a single page + * mapped at 0x00000000 a whole cache clean will be performed rather than + * just the 1 page. Since this should not occur in everyday use and if it does + * it will just result in not the most efficient clean for the page. + * + * We don't yet use this function but may want to. + */ +static int +pmap_clean_page(struct pv_entry *pv, boolean_t is_src) +{ + pmap_t pm, pm_to_clean = NULL; + struct pv_entry *npv; + u_int cache_needs_cleaning = 0; + u_int flags = 0; + vm_offset_t page_to_clean = 0; + + if (pv == NULL) { + /* nothing mapped in so nothing to flush */ + return (0); + } + + /* + * Since we flush the cache each time we change to a different + * user vmspace, we only need to flush the page if it is in the + * current pmap. + */ + if (curthread) + pm = vmspace_pmap(curproc->p_vmspace); + else + pm = kernel_pmap; + + for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { + if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) { + flags |= npv->pv_flags; + /* + * The page is mapped non-cacheable in + * this map. No need to flush the cache. + */ + if (npv->pv_flags & PVF_NC) { +#ifdef DIAGNOSTIC + if (cache_needs_cleaning) + panic("pmap_clean_page: " + "cache inconsistency"); +#endif + break; + } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) + continue; + if (cache_needs_cleaning) { + page_to_clean = 0; + break; + } else { + page_to_clean = npv->pv_va; + pm_to_clean = npv->pv_pmap; + } + cache_needs_cleaning = 1; + } + } + if (page_to_clean) { + if (PV_BEEN_EXECD(flags)) + pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, + PAGE_SIZE); + else + pmap_dcache_wb_range(pm_to_clean, page_to_clean, + PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); + } else if (cache_needs_cleaning) { + if (PV_BEEN_EXECD(flags)) + pmap_idcache_wbinv_all(pm); + else + pmap_dcache_wbinv_all(pm); + return (1); + } + return (0); +} +#endif + +/* + * pmap_copy_page copies the specified (machine independent) + * page by mapping the page into virtual memory and using + * bcopy to copy the page, one machine dependent page at a + * time. + */ + +/* + * pmap_copy_page() + * + * Copy one physical page into another, by mapping the pages into + * hook points. The same comment regarding cachability as in + * pmap_zero_page also applies here. + */ +#if ARM_MMU_GENERIC != 0 || defined (CPU_XSCALE_CORE3) +void +pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) +{ +#if 0 + struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); +#endif + + /* + * Clean the source page. Hold the source page's lock for + * the duration of the copy so that no other mappings can + * be created while we have a potentially aliased mapping. + */ +#if 0 + /* + * XXX: Not needed while we call cpu_dcache_wbinv_all() in + * pmap_copy_page(). + */ + (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); +#endif + /* + * Map the pages into the page hook points, copy them, and purge + * the cache for the appropriate page. Invalidate the TLB + * as required. + */ + mtx_lock(&cmtx); + *csrc_pte = L2_S_PROTO | src | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; + PTE_SYNC(csrc_pte); + *cdst_pte = L2_S_PROTO | dst | + L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + bcopy_page(csrcp, cdstp); + mtx_unlock(&cmtx); + cpu_dcache_inv_range(csrcp, PAGE_SIZE); + cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); + cpu_l2cache_inv_range(csrcp, PAGE_SIZE); + cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); +} + +void +pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) +{ + + mtx_lock(&cmtx); + *csrc_pte = L2_S_PROTO | a_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; + PTE_SYNC(csrc_pte); + *cdst_pte = L2_S_PROTO | b_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); + mtx_unlock(&cmtx); + cpu_dcache_inv_range(csrcp + a_offs, cnt); + cpu_dcache_wbinv_range(cdstp + b_offs, cnt); + cpu_l2cache_inv_range(csrcp + a_offs, cnt); + cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); +} +#endif /* ARM_MMU_GENERIC != 0 */ + +#if ARM_MMU_XSCALE == 1 +void +pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) +{ +#if 0 + /* XXX: Only needed for pmap_clean_page(), which is commented out. */ + struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); +#endif + + /* + * Clean the source page. Hold the source page's lock for + * the duration of the copy so that no other mappings can + * be created while we have a potentially aliased mapping. + */ +#if 0 + /* + * XXX: Not needed while we call cpu_dcache_wbinv_all() in + * pmap_copy_page(). + */ + (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); +#endif + /* + * Map the pages into the page hook points, copy them, and purge + * the cache for the appropriate page. Invalidate the TLB + * as required. + */ + mtx_lock(&cmtx); + *csrc_pte = L2_S_PROTO | src | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ + PTE_SYNC(csrc_pte); + *cdst_pte = L2_S_PROTO | dst | + L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + bcopy_page(csrcp, cdstp); + mtx_unlock(&cmtx); + xscale_cache_clean_minidata(); +} + +void +pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, + vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) +{ + + mtx_lock(&cmtx); + *csrc_pte = L2_S_PROTO | a_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); + PTE_SYNC(csrc_pte); + *cdst_pte = L2_S_PROTO | b_phys | + L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | + L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); + PTE_SYNC(cdst_pte); + cpu_tlb_flushD_SE(csrcp); + cpu_tlb_flushD_SE(cdstp); + cpu_cpwait(); + bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); + mtx_unlock(&cmtx); + xscale_cache_clean_minidata(); +} +#endif /* ARM_MMU_XSCALE == 1 */ + +void +pmap_copy_page(vm_page_t src, vm_page_t dst) +{ + + cpu_dcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && + _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), + (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) + return; + pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); +} + +/* + * We have code to do unmapped I/O. However, it isn't quite right and + * causes un-page-aligned I/O to devices to fail (most notably newfs + * or fsck). We give up a little performance to not allow unmapped I/O + * to gain stability. + */ +int unmapped_buf_allowed = 0; + +void +pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], + vm_offset_t b_offset, int xfersize) +{ + vm_page_t a_pg, b_pg; + vm_offset_t a_pg_offset, b_pg_offset; + int cnt; + + cpu_dcache_wbinv_all(); + cpu_l2cache_wbinv_all(); + while (xfersize > 0) { + a_pg = ma[a_offset >> PAGE_SHIFT]; + a_pg_offset = a_offset & PAGE_MASK; + cnt = min(xfersize, PAGE_SIZE - a_pg_offset); + b_pg = mb[b_offset >> PAGE_SHIFT]; + b_pg_offset = b_offset & PAGE_MASK; + cnt = min(cnt, PAGE_SIZE - b_pg_offset); + pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset, + VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt); + xfersize -= cnt; + a_offset += cnt; + b_offset += cnt; + } +} + +vm_offset_t +pmap_quick_enter_page(vm_page_t m) +{ + /* + * Don't bother with a PCPU pageframe, since we don't support + * SMP for anything pre-armv7. Use pmap_kenter() to ensure + * caching is handled correctly for multiple mappings of the + * same physical page. + */ + + mtx_assert(&qmap_mtx, MA_NOTOWNED); + mtx_lock(&qmap_mtx); + + pmap_kenter(qmap_addr, VM_PAGE_TO_PHYS(m)); + + return (qmap_addr); +} + +void +pmap_quick_remove_page(vm_offset_t addr) +{ + KASSERT(addr == qmap_addr, + ("pmap_quick_remove_page: invalid address")); + mtx_assert(&qmap_mtx, MA_OWNED); + pmap_kremove(addr); + mtx_unlock(&qmap_mtx); +} + +/* + * this routine returns true if a physical page resides + * in the given pmap. + */ +boolean_t +pmap_page_exists_quick(pmap_t pmap, vm_page_t m) +{ + pv_entry_t pv; + int loops = 0; + boolean_t rv; + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_page_exists_quick: page %p is not managed", m)); + rv = FALSE; + rw_wlock(&pvh_global_lock); + TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { + if (pv->pv_pmap == pmap) { + rv = TRUE; + break; + } + loops++; + if (loops >= 16) + break; + } + rw_wunlock(&pvh_global_lock); + return (rv); +} + +/* + * pmap_page_wired_mappings: + * + * Return the number of managed mappings to the given physical page + * that are wired. + */ +int +pmap_page_wired_mappings(vm_page_t m) +{ + pv_entry_t pv; + int count; + + count = 0; + if ((m->oflags & VPO_UNMANAGED) != 0) + return (count); + rw_wlock(&pvh_global_lock); + TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) + if ((pv->pv_flags & PVF_WIRED) != 0) + count++; + rw_wunlock(&pvh_global_lock); + return (count); +} + +/* + * This function is advisory. + */ +void +pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) +{ +} + +/* + * pmap_ts_referenced: + * + * Return the count of reference bits for a page, clearing all of them. + */ +int +pmap_ts_referenced(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_ts_referenced: page %p is not managed", m)); + return (pmap_clearbit(m, PVF_REF)); +} + + +boolean_t +pmap_is_modified(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_is_modified: page %p is not managed", m)); + if (m->md.pvh_attrs & PVF_MOD) + return (TRUE); + + return(FALSE); +} + + +/* + * Clear the modify bits on the specified physical page. + */ +void +pmap_clear_modify(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_clear_modify: page %p is not managed", m)); + VM_OBJECT_ASSERT_WLOCKED(m->object); + KASSERT(!vm_page_xbusied(m), + ("pmap_clear_modify: page %p is exclusive busied", m)); + + /* + * If the page is not PGA_WRITEABLE, then no mappings can be modified. + * If the object containing the page is locked and the page is not + * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. + */ + if ((m->aflags & PGA_WRITEABLE) == 0) + return; + if (m->md.pvh_attrs & PVF_MOD) + pmap_clearbit(m, PVF_MOD); +} + + +/* + * pmap_is_referenced: + * + * Return whether or not the specified physical page was referenced + * in any physical maps. + */ +boolean_t +pmap_is_referenced(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_is_referenced: page %p is not managed", m)); + return ((m->md.pvh_attrs & PVF_REF) != 0); +} + + +/* + * Clear the write and modified bits in each of the given page's mappings. + */ +void +pmap_remove_write(vm_page_t m) +{ + + KASSERT((m->oflags & VPO_UNMANAGED) == 0, + ("pmap_remove_write: page %p is not managed", m)); + + /* + * If the page is not exclusive busied, then PGA_WRITEABLE cannot be + * set by another thread while the object is locked. Thus, + * if PGA_WRITEABLE is clear, no page table entries need updating. + */ + VM_OBJECT_ASSERT_WLOCKED(m->object); + if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0) + pmap_clearbit(m, PVF_WRITE); +} + + +/* + * perform the pmap work for mincore + */ +int +pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) +{ + struct l2_bucket *l2b; + pt_entry_t *ptep, pte; + vm_paddr_t pa; + vm_page_t m; + int val; + boolean_t managed; + + PMAP_LOCK(pmap); +retry: + l2b = pmap_get_l2_bucket(pmap, addr); + if (l2b == NULL) { + val = 0; + goto out; + } + ptep = &l2b->l2b_kva[l2pte_index(addr)]; + pte = *ptep; + if (!l2pte_valid(pte)) { + val = 0; + goto out; + } + val = MINCORE_INCORE; + if (pte & L2_S_PROT_W) + val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; + managed = false; + pa = l2pte_pa(pte); + m = PHYS_TO_VM_PAGE(pa); + if (m != NULL && !(m->oflags & VPO_UNMANAGED)) + managed = true; + if (managed) { + /* + * The ARM pmap tries to maintain a per-mapping + * reference bit. The trouble is that it's kept in + * the PV entry, not the PTE, so it's costly to access + * here. You would need to acquire the pvh global + * lock, call pmap_find_pv(), and introduce a custom + * version of vm_page_pa_tryrelock() that releases and + * reacquires the pvh global lock. In the end, I + * doubt it's worthwhile. This may falsely report + * the given address as referenced. + */ + if ((m->md.pvh_attrs & PVF_REF) != 0) + val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; + } + if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != + (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { + /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ + if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) + goto retry; + } else +out: + PA_UNLOCK_COND(*locked_pa); + PMAP_UNLOCK(pmap); + return (val); +} + + +void +pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) +{ +} + + +/* + * Increase the starting virtual address of the given mapping if a + * different alignment might result in more superpage mappings. + */ +void +pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, + vm_offset_t *addr, vm_size_t size) +{ +} + +#define BOOTSTRAP_DEBUG + +/* + * pmap_map_section: + * + * Create a single section mapping. + */ +void +pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, + int prot, int cache) +{ + pd_entry_t *pde = (pd_entry_t *) l1pt; + pd_entry_t fl; + + KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); + + switch (cache) { + case PTE_NOCACHE: + default: + fl = 0; + break; + + case PTE_CACHE: + fl = pte_l1_s_cache_mode; + break; + + case PTE_PAGETABLE: + fl = pte_l1_s_cache_mode_pt; + break; + } + + pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | + L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); + PTE_SYNC(&pde[va >> L1_S_SHIFT]); + +} + +/* + * pmap_link_l2pt: + * + * Link the L2 page table specified by l2pv.pv_pa into the L1 + * page table at the slot for "va". + */ +void +pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) +{ + pd_entry_t *pde = (pd_entry_t *) l1pt, proto; + u_int slot = va >> L1_S_SHIFT; + + proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; + +#ifdef VERBOSE_INIT_ARM + printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); +#endif + + pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); + + PTE_SYNC(&pde[slot]); + + SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); + + +} + +/* + * pmap_map_entry + * + * Create a single page mapping. + */ +void +pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, + int cache) +{ + pd_entry_t *pde = (pd_entry_t *) l1pt; + pt_entry_t fl; + pt_entry_t *pte; + + KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); + + switch (cache) { + case PTE_NOCACHE: + default: + fl = 0; + break; + + case PTE_CACHE: + fl = pte_l2_s_cache_mode; + break; + + case PTE_PAGETABLE: + fl = pte_l2_s_cache_mode_pt; + break; + } + + if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) + panic("pmap_map_entry: no L2 table for VA 0x%08x", va); + + pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); + + if (pte == NULL) + panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); + + pte[l2pte_index(va)] = + L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; + PTE_SYNC(&pte[l2pte_index(va)]); +} + +/* + * pmap_map_chunk: + * + * Map a chunk of memory using the most efficient mappings + * possible (section. large page, small page) into the + * provided L1 and L2 tables at the specified virtual address. + */ +vm_size_t +pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, + vm_size_t size, int prot, int cache) +{ + pd_entry_t *pde = (pd_entry_t *) l1pt; + pt_entry_t *pte, f1, f2s, f2l; + vm_size_t resid; + int i; + + resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); + + if (l1pt == 0) + panic("pmap_map_chunk: no L1 table provided"); + +#ifdef VERBOSE_INIT_ARM + printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " + "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); +#endif + + switch (cache) { + case PTE_NOCACHE: + default: + f1 = 0; + f2l = 0; + f2s = 0; + break; + + case PTE_CACHE: + f1 = pte_l1_s_cache_mode; + f2l = pte_l2_l_cache_mode; + f2s = pte_l2_s_cache_mode; + break; + + case PTE_PAGETABLE: + f1 = pte_l1_s_cache_mode_pt; + f2l = pte_l2_l_cache_mode_pt; + f2s = pte_l2_s_cache_mode_pt; + break; + } + + size = resid; + + while (resid > 0) { + /* See if we can use a section mapping. */ + if (L1_S_MAPPABLE_P(va, pa, resid)) { +#ifdef VERBOSE_INIT_ARM + printf("S"); +#endif + pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | + L1_S_PROT(PTE_KERNEL, prot) | f1 | + L1_S_DOM(PMAP_DOMAIN_KERNEL); + PTE_SYNC(&pde[va >> L1_S_SHIFT]); + va += L1_S_SIZE; + pa += L1_S_SIZE; + resid -= L1_S_SIZE; + continue; + } + + /* + * Ok, we're going to use an L2 table. Make sure + * one is actually in the corresponding L1 slot + * for the current VA. + */ + if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) + panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); + + pte = (pt_entry_t *) kernel_pt_lookup( + pde[L1_IDX(va)] & L1_C_ADDR_MASK); + if (pte == NULL) + panic("pmap_map_chunk: can't find L2 table for VA" + "0x%08x", va); + /* See if we can use a L2 large page mapping. */ + if (L2_L_MAPPABLE_P(va, pa, resid)) { +#ifdef VERBOSE_INIT_ARM + printf("L"); +#endif + for (i = 0; i < 16; i++) { + pte[l2pte_index(va) + i] = + L2_L_PROTO | pa | + L2_L_PROT(PTE_KERNEL, prot) | f2l; + PTE_SYNC(&pte[l2pte_index(va) + i]); + } + va += L2_L_SIZE; + pa += L2_L_SIZE; + resid -= L2_L_SIZE; + continue; + } + + /* Use a small page mapping. */ +#ifdef VERBOSE_INIT_ARM + printf("P"); +#endif + pte[l2pte_index(va)] = + L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; + PTE_SYNC(&pte[l2pte_index(va)]); + va += PAGE_SIZE; + pa += PAGE_SIZE; + resid -= PAGE_SIZE; + } +#ifdef VERBOSE_INIT_ARM + printf("\n"); +#endif + return (size); + +} + +void +pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) +{ + /* + * Remember the memattr in a field that gets used to set the appropriate + * bits in the PTEs as mappings are established. + */ + m->md.pv_memattr = ma; + + /* + * It appears that this function can only be called before any mappings + * for the page are established on ARM. If this ever changes, this code + * will need to walk the pv_list and make each of the existing mappings + * uncacheable, being careful to sync caches and PTEs (and maybe + * invalidate TLB?) for any current mapping it modifies. + */ + if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) + panic("Can't change memattr on page with existing mappings"); +} + + diff --git a/sys/arm/arm/pmap.c b/sys/arm/arm/pmap.c deleted file mode 100644 index 529e9f1d4518..000000000000 --- a/sys/arm/arm/pmap.c +++ /dev/null @@ -1,4874 +0,0 @@ -/* From: $NetBSD: pmap.c,v 1.148 2004/04/03 04:35:48 bsh Exp $ */ -/*- - * Copyright 2004 Olivier Houchard. - * Copyright 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Steve C. Woodford for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/*- - * Copyright (c) 2002-2003 Wasabi Systems, Inc. - * Copyright (c) 2001 Richard Earnshaw - * Copyright (c) 2001-2002 Christopher Gilbert - * All rights reserved. - * - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ -/*- - * Copyright (c) 1999 The NetBSD Foundation, Inc. - * All rights reserved. - * - * This code is derived from software contributed to The NetBSD Foundation - * by Charles M. Hannum. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS - * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ - -/*- - * Copyright (c) 1994-1998 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Mark Brinicombe. - * 4. The name of the author may not be used to endorse or promote products - * derived from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR - * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES - * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, - * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT - * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF - * - * RiscBSD kernel project - * - * pmap.c - * - * Machine dependant vm stuff - * - * Created : 20/09/94 - */ - -/* - * Special compilation symbols - * PMAP_DEBUG - Build in pmap_debug_level code - * - * Note that pmap_mapdev() and pmap_unmapdev() are implemented in arm/devmap.c - */ -/* Include header files */ - -#include "opt_vm.h" - -#include -__FBSDID("$FreeBSD$"); -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#ifdef PMAP_DEBUG -#define PDEBUG(_lev_,_stat_) \ - if (pmap_debug_level >= (_lev_)) \ - ((_stat_)) -#define dprintf printf - -int pmap_debug_level = 0; -#define PMAP_INLINE -#else /* PMAP_DEBUG */ -#define PDEBUG(_lev_,_stat_) /* Nothing */ -#define dprintf(x, arg...) -#define PMAP_INLINE __inline -#endif /* PMAP_DEBUG */ - -extern struct pv_addr systempage; - -extern int last_fault_code; - -/* - * Internal function prototypes - */ -static void pmap_free_pv_entry (pv_entry_t); -static pv_entry_t pmap_get_pv_entry(void); - -static int pmap_enter_locked(pmap_t, vm_offset_t, vm_page_t, - vm_prot_t, u_int); -static vm_paddr_t pmap_extract_locked(pmap_t pmap, vm_offset_t va); -static void pmap_fix_cache(struct vm_page *, pmap_t, vm_offset_t); -static void pmap_alloc_l1(pmap_t); -static void pmap_free_l1(pmap_t); - -static int pmap_clearbit(struct vm_page *, u_int); - -static struct l2_bucket *pmap_get_l2_bucket(pmap_t, vm_offset_t); -static struct l2_bucket *pmap_alloc_l2_bucket(pmap_t, vm_offset_t); -static void pmap_free_l2_bucket(pmap_t, struct l2_bucket *, u_int); -static vm_offset_t kernel_pt_lookup(vm_paddr_t); - -static MALLOC_DEFINE(M_VMPMAP, "pmap", "PMAP L1"); - -vm_offset_t virtual_avail; /* VA of first avail page (after kernel bss) */ -vm_offset_t virtual_end; /* VA of last avail page (end of kernel AS) */ -vm_offset_t pmap_curmaxkvaddr; -vm_paddr_t kernel_l1pa; - -vm_offset_t kernel_vm_end = 0; - -vm_offset_t vm_max_kernel_address; - -struct pmap kernel_pmap_store; - -static pt_entry_t *csrc_pte, *cdst_pte; -static vm_offset_t csrcp, cdstp, qmap_addr; -static struct mtx cmtx, qmap_mtx; - -static void pmap_init_l1(struct l1_ttable *, pd_entry_t *); -/* - * These routines are called when the CPU type is identified to set up - * the PTE prototypes, cache modes, etc. - * - * The variables are always here, just in case LKMs need to reference - * them (though, they shouldn't). - */ - -pt_entry_t pte_l1_s_cache_mode; -pt_entry_t pte_l1_s_cache_mode_pt; -pt_entry_t pte_l1_s_cache_mask; - -pt_entry_t pte_l2_l_cache_mode; -pt_entry_t pte_l2_l_cache_mode_pt; -pt_entry_t pte_l2_l_cache_mask; - -pt_entry_t pte_l2_s_cache_mode; -pt_entry_t pte_l2_s_cache_mode_pt; -pt_entry_t pte_l2_s_cache_mask; - -pt_entry_t pte_l2_s_prot_u; -pt_entry_t pte_l2_s_prot_w; -pt_entry_t pte_l2_s_prot_mask; - -pt_entry_t pte_l1_s_proto; -pt_entry_t pte_l1_c_proto; -pt_entry_t pte_l2_s_proto; - -void (*pmap_copy_page_func)(vm_paddr_t, vm_paddr_t); -void (*pmap_copy_page_offs_func)(vm_paddr_t a_phys, - vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, - int cnt); -void (*pmap_zero_page_func)(vm_paddr_t, int, int); - -struct msgbuf *msgbufp = 0; - -/* - * Crashdump maps. - */ -static caddr_t crashdumpmap; - -extern void bcopy_page(vm_offset_t, vm_offset_t); -extern void bzero_page(vm_offset_t); - -extern vm_offset_t alloc_firstaddr; - -char *_tmppt; - -/* - * Metadata for L1 translation tables. - */ -struct l1_ttable { - /* Entry on the L1 Table list */ - SLIST_ENTRY(l1_ttable) l1_link; - - /* Entry on the L1 Least Recently Used list */ - TAILQ_ENTRY(l1_ttable) l1_lru; - - /* Track how many domains are allocated from this L1 */ - volatile u_int l1_domain_use_count; - - /* - * A free-list of domain numbers for this L1. - * We avoid using ffs() and a bitmap to track domains since ffs() - * is slow on ARM. - */ - u_int8_t l1_domain_first; - u_int8_t l1_domain_free[PMAP_DOMAINS]; - - /* Physical address of this L1 page table */ - vm_paddr_t l1_physaddr; - - /* KVA of this L1 page table */ - pd_entry_t *l1_kva; -}; - -/* - * Convert a virtual address into its L1 table index. That is, the - * index used to locate the L2 descriptor table pointer in an L1 table. - * This is basically used to index l1->l1_kva[]. - * - * Each L2 descriptor table represents 1MB of VA space. - */ -#define L1_IDX(va) (((vm_offset_t)(va)) >> L1_S_SHIFT) - -/* - * L1 Page Tables are tracked using a Least Recently Used list. - * - New L1s are allocated from the HEAD. - * - Freed L1s are added to the TAIl. - * - Recently accessed L1s (where an 'access' is some change to one of - * the userland pmaps which owns this L1) are moved to the TAIL. - */ -static TAILQ_HEAD(, l1_ttable) l1_lru_list; -/* - * A list of all L1 tables - */ -static SLIST_HEAD(, l1_ttable) l1_list; -static struct mtx l1_lru_lock; - -/* - * The l2_dtable tracks L2_BUCKET_SIZE worth of L1 slots. - * - * This is normally 16MB worth L2 page descriptors for any given pmap. - * Reference counts are maintained for L2 descriptors so they can be - * freed when empty. - */ -struct l2_dtable { - /* The number of L2 page descriptors allocated to this l2_dtable */ - u_int l2_occupancy; - - /* List of L2 page descriptors */ - struct l2_bucket { - pt_entry_t *l2b_kva; /* KVA of L2 Descriptor Table */ - vm_paddr_t l2b_phys; /* Physical address of same */ - u_short l2b_l1idx; /* This L2 table's L1 index */ - u_short l2b_occupancy; /* How many active descriptors */ - } l2_bucket[L2_BUCKET_SIZE]; -}; - -/* pmap_kenter_internal flags */ -#define KENTER_CACHE 0x1 -#define KENTER_USER 0x2 - -/* - * Given an L1 table index, calculate the corresponding l2_dtable index - * and bucket index within the l2_dtable. - */ -#define L2_IDX(l1idx) (((l1idx) >> L2_BUCKET_LOG2) & \ - (L2_SIZE - 1)) -#define L2_BUCKET(l1idx) ((l1idx) & (L2_BUCKET_SIZE - 1)) - -/* - * Given a virtual address, this macro returns the - * virtual address required to drop into the next L2 bucket. - */ -#define L2_NEXT_BUCKET(va) (((va) & L1_S_FRAME) + L1_S_SIZE) - -/* - * We try to map the page tables write-through, if possible. However, not - * all CPUs have a write-through cache mode, so on those we have to sync - * the cache when we frob page tables. - * - * We try to evaluate this at compile time, if possible. However, it's - * not always possible to do that, hence this run-time var. - */ -int pmap_needs_pte_sync; - -/* - * Macro to determine if a mapping might be resident in the - * instruction cache and/or TLB - */ -#define PV_BEEN_EXECD(f) (((f) & (PVF_REF | PVF_EXEC)) == (PVF_REF | PVF_EXEC)) - -/* - * Macro to determine if a mapping might be resident in the - * data cache and/or TLB - */ -#define PV_BEEN_REFD(f) (((f) & PVF_REF) != 0) - -#ifndef PMAP_SHPGPERPROC -#define PMAP_SHPGPERPROC 200 -#endif - -#define pmap_is_current(pm) ((pm) == kernel_pmap || \ - curproc->p_vmspace->vm_map.pmap == (pm)) -static uma_zone_t pvzone = NULL; -uma_zone_t l2zone; -static uma_zone_t l2table_zone; -static vm_offset_t pmap_kernel_l2dtable_kva; -static vm_offset_t pmap_kernel_l2ptp_kva; -static vm_paddr_t pmap_kernel_l2ptp_phys; -static int pv_entry_count=0, pv_entry_max=0, pv_entry_high_water=0; -static struct rwlock pvh_global_lock; - -void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); -#if ARM_MMU_XSCALE == 1 -void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); -#endif - -/* - * This list exists for the benefit of pmap_map_chunk(). It keeps track - * of the kernel L2 tables during bootstrap, so that pmap_map_chunk() can - * find them as necessary. - * - * Note that the data on this list MUST remain valid after initarm() returns, - * as pmap_bootstrap() uses it to contruct L2 table metadata. - */ -SLIST_HEAD(, pv_addr) kernel_pt_list = SLIST_HEAD_INITIALIZER(kernel_pt_list); - -static void -pmap_init_l1(struct l1_ttable *l1, pd_entry_t *l1pt) -{ - int i; - - l1->l1_kva = l1pt; - l1->l1_domain_use_count = 0; - l1->l1_domain_first = 0; - - for (i = 0; i < PMAP_DOMAINS; i++) - l1->l1_domain_free[i] = i + 1; - - /* - * Copy the kernel's L1 entries to each new L1. - */ - if (l1pt != kernel_pmap->pm_l1->l1_kva) - memcpy(l1pt, kernel_pmap->pm_l1->l1_kva, L1_TABLE_SIZE); - - if ((l1->l1_physaddr = pmap_extract(kernel_pmap, (vm_offset_t)l1pt)) == 0) - panic("pmap_init_l1: can't get PA of L1 at %p", l1pt); - SLIST_INSERT_HEAD(&l1_list, l1, l1_link); - TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); -} - -static vm_offset_t -kernel_pt_lookup(vm_paddr_t pa) -{ - struct pv_addr *pv; - - SLIST_FOREACH(pv, &kernel_pt_list, pv_list) { - if (pv->pv_pa == pa) - return (pv->pv_va); - } - return (0); -} - -#if ARM_MMU_GENERIC != 0 -void -pmap_pte_init_generic(void) -{ - - pte_l1_s_cache_mode = L1_S_B|L1_S_C; - pte_l1_s_cache_mask = L1_S_CACHE_MASK_generic; - - pte_l2_l_cache_mode = L2_B|L2_C; - pte_l2_l_cache_mask = L2_L_CACHE_MASK_generic; - - pte_l2_s_cache_mode = L2_B|L2_C; - pte_l2_s_cache_mask = L2_S_CACHE_MASK_generic; - - /* - * If we have a write-through cache, set B and C. If - * we have a write-back cache, then we assume setting - * only C will make those pages write-through. - */ - if (cpufuncs.cf_dcache_wb_range == (void *) cpufunc_nullop) { - pte_l1_s_cache_mode_pt = L1_S_B|L1_S_C; - pte_l2_l_cache_mode_pt = L2_B|L2_C; - pte_l2_s_cache_mode_pt = L2_B|L2_C; - } else { - pte_l1_s_cache_mode_pt = L1_S_C; - pte_l2_l_cache_mode_pt = L2_C; - pte_l2_s_cache_mode_pt = L2_C; - } - - pte_l2_s_prot_u = L2_S_PROT_U_generic; - pte_l2_s_prot_w = L2_S_PROT_W_generic; - pte_l2_s_prot_mask = L2_S_PROT_MASK_generic; - - pte_l1_s_proto = L1_S_PROTO_generic; - pte_l1_c_proto = L1_C_PROTO_generic; - pte_l2_s_proto = L2_S_PROTO_generic; - - pmap_copy_page_func = pmap_copy_page_generic; - pmap_copy_page_offs_func = pmap_copy_page_offs_generic; - pmap_zero_page_func = pmap_zero_page_generic; -} - -#endif /* ARM_MMU_GENERIC != 0 */ - -#if ARM_MMU_XSCALE == 1 -#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3) -static u_int xscale_use_minidata; -#endif - -void -pmap_pte_init_xscale(void) -{ - uint32_t auxctl; - int write_through = 0; - - pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; - pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; - - pte_l2_l_cache_mode = L2_B|L2_C; - pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; - - pte_l2_s_cache_mode = L2_B|L2_C; - pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; - - pte_l1_s_cache_mode_pt = L1_S_C; - pte_l2_l_cache_mode_pt = L2_C; - pte_l2_s_cache_mode_pt = L2_C; -#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE - /* - * The XScale core has an enhanced mode where writes that - * miss the cache cause a cache line to be allocated. This - * is significantly faster than the traditional, write-through - * behavior of this case. - */ - pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); - pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); - pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); -#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ -#ifdef XSCALE_CACHE_WRITE_THROUGH - /* - * Some versions of the XScale core have various bugs in - * their cache units, the work-around for which is to run - * the cache in write-through mode. Unfortunately, this - * has a major (negative) impact on performance. So, we - * go ahead and run fast-and-loose, in the hopes that we - * don't line up the planets in a way that will trip the - * bugs. - * - * However, we give you the option to be slow-but-correct. - */ - write_through = 1; -#elif defined(XSCALE_CACHE_WRITE_BACK) - /* force write back cache mode */ - write_through = 0; -#elif defined(CPU_XSCALE_PXA2X0) - /* - * Intel PXA2[15]0 processors are known to have a bug in - * write-back cache on revision 4 and earlier (stepping - * A[01] and B[012]). Fixed for C0 and later. - */ - { - uint32_t id, type; - - id = cpu_ident(); - type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); - - if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { - if ((id & CPU_ID_REVISION_MASK) < 5) { - /* write through for stepping A0-1 and B0-2 */ - write_through = 1; - } - } - } -#endif /* XSCALE_CACHE_WRITE_THROUGH */ - - if (write_through) { - pte_l1_s_cache_mode = L1_S_C; - pte_l2_l_cache_mode = L2_C; - pte_l2_s_cache_mode = L2_C; - } - -#if (ARM_NMMUS > 1) - xscale_use_minidata = 1; -#endif - - pte_l2_s_prot_u = L2_S_PROT_U_xscale; - pte_l2_s_prot_w = L2_S_PROT_W_xscale; - pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; - - pte_l1_s_proto = L1_S_PROTO_xscale; - pte_l1_c_proto = L1_C_PROTO_xscale; - pte_l2_s_proto = L2_S_PROTO_xscale; - -#ifdef CPU_XSCALE_CORE3 - pmap_copy_page_func = pmap_copy_page_generic; - pmap_copy_page_offs_func = pmap_copy_page_offs_generic; - pmap_zero_page_func = pmap_zero_page_generic; - xscale_use_minidata = 0; - /* Make sure it is L2-cachable */ - pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T); - pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P; - pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ; - pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode; - pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T); - pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; - -#else - pmap_copy_page_func = pmap_copy_page_xscale; - pmap_copy_page_offs_func = pmap_copy_page_offs_xscale; - pmap_zero_page_func = pmap_zero_page_xscale; -#endif - - /* - * Disable ECC protection of page table access, for now. - */ - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); - auxctl &= ~XSCALE_AUXCTL_P; - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); -} - -/* - * xscale_setup_minidata: - * - * Set up the mini-data cache clean area. We require the - * caller to allocate the right amount of physically and - * virtually contiguous space. - */ -extern vm_offset_t xscale_minidata_clean_addr; -extern vm_size_t xscale_minidata_clean_size; /* already initialized */ -void -xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pt_entry_t *pte; - vm_size_t size; - uint32_t auxctl; - - xscale_minidata_clean_addr = va; - - /* Round it to page size. */ - size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; - - for (; size != 0; - va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { - pte = (pt_entry_t *) kernel_pt_lookup( - pde[L1_IDX(va)] & L1_C_ADDR_MASK); - if (pte == NULL) - panic("xscale_setup_minidata: can't find L2 table for " - "VA 0x%08x", (u_int32_t) va); - pte[l2pte_index(va)] = - L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - } - - /* - * Configure the mini-data cache for write-back with - * read/write-allocate. - * - * NOTE: In order to reconfigure the mini-data cache, we must - * make sure it contains no valid data! In order to do that, - * we must issue a global data cache invalidate command! - * - * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! - * THIS IS VERY IMPORTANT! - */ - - /* Invalidate data and mini-data. */ - __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); - auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); -} -#endif - -/* - * Allocate an L1 translation table for the specified pmap. - * This is called at pmap creation time. - */ -static void -pmap_alloc_l1(pmap_t pm) -{ - struct l1_ttable *l1; - u_int8_t domain; - - /* - * Remove the L1 at the head of the LRU list - */ - mtx_lock(&l1_lru_lock); - l1 = TAILQ_FIRST(&l1_lru_list); - TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); - - /* - * Pick the first available domain number, and update - * the link to the next number. - */ - domain = l1->l1_domain_first; - l1->l1_domain_first = l1->l1_domain_free[domain]; - - /* - * If there are still free domain numbers in this L1, - * put it back on the TAIL of the LRU list. - */ - if (++l1->l1_domain_use_count < PMAP_DOMAINS) - TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); - - mtx_unlock(&l1_lru_lock); - - /* - * Fix up the relevant bits in the pmap structure - */ - pm->pm_l1 = l1; - pm->pm_domain = domain + 1; -} - -/* - * Free an L1 translation table. - * This is called at pmap destruction time. - */ -static void -pmap_free_l1(pmap_t pm) -{ - struct l1_ttable *l1 = pm->pm_l1; - - mtx_lock(&l1_lru_lock); - - /* - * If this L1 is currently on the LRU list, remove it. - */ - if (l1->l1_domain_use_count < PMAP_DOMAINS) - TAILQ_REMOVE(&l1_lru_list, l1, l1_lru); - - /* - * Free up the domain number which was allocated to the pmap - */ - l1->l1_domain_free[pm->pm_domain - 1] = l1->l1_domain_first; - l1->l1_domain_first = pm->pm_domain - 1; - l1->l1_domain_use_count--; - - /* - * The L1 now must have at least 1 free domain, so add - * it back to the LRU list. If the use count is zero, - * put it at the head of the list, otherwise it goes - * to the tail. - */ - if (l1->l1_domain_use_count == 0) { - TAILQ_INSERT_HEAD(&l1_lru_list, l1, l1_lru); - } else - TAILQ_INSERT_TAIL(&l1_lru_list, l1, l1_lru); - - mtx_unlock(&l1_lru_lock); -} - -/* - * Returns a pointer to the L2 bucket associated with the specified pmap - * and VA, or NULL if no L2 bucket exists for the address. - */ -static PMAP_INLINE struct l2_bucket * -pmap_get_l2_bucket(pmap_t pm, vm_offset_t va) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - u_short l1idx; - - l1idx = L1_IDX(va); - - if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL || - (l2b = &l2->l2_bucket[L2_BUCKET(l1idx)])->l2b_kva == NULL) - return (NULL); - - return (l2b); -} - -/* - * Returns a pointer to the L2 bucket associated with the specified pmap - * and VA. - * - * If no L2 bucket exists, perform the necessary allocations to put an L2 - * bucket/page table in place. - * - * Note that if a new L2 bucket/page was allocated, the caller *must* - * increment the bucket occupancy counter appropriately *before* - * releasing the pmap's lock to ensure no other thread or cpu deallocates - * the bucket/page in the meantime. - */ -static struct l2_bucket * -pmap_alloc_l2_bucket(pmap_t pm, vm_offset_t va) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - u_short l1idx; - - l1idx = L1_IDX(va); - - PMAP_ASSERT_LOCKED(pm); - rw_assert(&pvh_global_lock, RA_WLOCKED); - if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { - /* - * No mapping at this address, as there is - * no entry in the L1 table. - * Need to allocate a new l2_dtable. - */ - PMAP_UNLOCK(pm); - rw_wunlock(&pvh_global_lock); - if ((l2 = uma_zalloc(l2table_zone, M_NOWAIT)) == NULL) { - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - return (NULL); - } - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - if (pm->pm_l2[L2_IDX(l1idx)] != NULL) { - /* - * Someone already allocated the l2_dtable while - * we were doing the same. - */ - uma_zfree(l2table_zone, l2); - l2 = pm->pm_l2[L2_IDX(l1idx)]; - } else { - bzero(l2, sizeof(*l2)); - /* - * Link it into the parent pmap - */ - pm->pm_l2[L2_IDX(l1idx)] = l2; - } - } - - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - - /* - * Fetch pointer to the L2 page table associated with the address. - */ - if (l2b->l2b_kva == NULL) { - pt_entry_t *ptep; - - /* - * No L2 page table has been allocated. Chances are, this - * is because we just allocated the l2_dtable, above. - */ - l2->l2_occupancy++; - PMAP_UNLOCK(pm); - rw_wunlock(&pvh_global_lock); - ptep = uma_zalloc(l2zone, M_NOWAIT); - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - if (l2b->l2b_kva != 0) { - /* We lost the race. */ - l2->l2_occupancy--; - uma_zfree(l2zone, ptep); - return (l2b); - } - l2b->l2b_phys = vtophys(ptep); - if (ptep == NULL) { - /* - * Oops, no more L2 page tables available at this - * time. We may need to deallocate the l2_dtable - * if we allocated a new one above. - */ - l2->l2_occupancy--; - if (l2->l2_occupancy == 0) { - pm->pm_l2[L2_IDX(l1idx)] = NULL; - uma_zfree(l2table_zone, l2); - } - return (NULL); - } - - l2b->l2b_kva = ptep; - l2b->l2b_l1idx = l1idx; - } - - return (l2b); -} - -static PMAP_INLINE void -#ifndef PMAP_INCLUDE_PTE_SYNC -pmap_free_l2_ptp(pt_entry_t *l2) -#else -pmap_free_l2_ptp(boolean_t need_sync, pt_entry_t *l2) -#endif -{ -#ifdef PMAP_INCLUDE_PTE_SYNC - /* - * Note: With a write-back cache, we may need to sync this - * L2 table before re-using it. - * This is because it may have belonged to a non-current - * pmap, in which case the cache syncs would have been - * skipped when the pages were being unmapped. If the - * L2 table were then to be immediately re-allocated to - * the *current* pmap, it may well contain stale mappings - * which have not yet been cleared by a cache write-back - * and so would still be visible to the mmu. - */ - if (need_sync) - PTE_SYNC_RANGE(l2, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); -#endif - uma_zfree(l2zone, l2); -} -/* - * One or more mappings in the specified L2 descriptor table have just been - * invalidated. - * - * Garbage collect the metadata and descriptor table itself if necessary. - * - * The pmap lock must be acquired when this is called (not necessary - * for the kernel pmap). - */ -static void -pmap_free_l2_bucket(pmap_t pm, struct l2_bucket *l2b, u_int count) -{ - struct l2_dtable *l2; - pd_entry_t *pl1pd, l1pd; - pt_entry_t *ptep; - u_short l1idx; - - - /* - * Update the bucket's reference count according to how many - * PTEs the caller has just invalidated. - */ - l2b->l2b_occupancy -= count; - - /* - * Note: - * - * Level 2 page tables allocated to the kernel pmap are never freed - * as that would require checking all Level 1 page tables and - * removing any references to the Level 2 page table. See also the - * comment elsewhere about never freeing bootstrap L2 descriptors. - * - * We make do with just invalidating the mapping in the L2 table. - * - * This isn't really a big deal in practice and, in fact, leads - * to a performance win over time as we don't need to continually - * alloc/free. - */ - if (l2b->l2b_occupancy > 0 || pm == kernel_pmap) - return; - - /* - * There are no more valid mappings in this level 2 page table. - * Go ahead and NULL-out the pointer in the bucket, then - * free the page table. - */ - l1idx = l2b->l2b_l1idx; - ptep = l2b->l2b_kva; - l2b->l2b_kva = NULL; - - pl1pd = &pm->pm_l1->l1_kva[l1idx]; - - /* - * If the L1 slot matches the pmap's domain - * number, then invalidate it. - */ - l1pd = *pl1pd & (L1_TYPE_MASK | L1_C_DOM_MASK); - if (l1pd == (L1_C_DOM(pm->pm_domain) | L1_TYPE_C)) { - *pl1pd = 0; - PTE_SYNC(pl1pd); - } - - /* - * Release the L2 descriptor table back to the pool cache. - */ -#ifndef PMAP_INCLUDE_PTE_SYNC - pmap_free_l2_ptp(ptep); -#else - pmap_free_l2_ptp(!pmap_is_current(pm), ptep); -#endif - - /* - * Update the reference count in the associated l2_dtable - */ - l2 = pm->pm_l2[L2_IDX(l1idx)]; - if (--l2->l2_occupancy > 0) - return; - - /* - * There are no more valid mappings in any of the Level 1 - * slots managed by this l2_dtable. Go ahead and NULL-out - * the pointer in the parent pmap and free the l2_dtable. - */ - pm->pm_l2[L2_IDX(l1idx)] = NULL; - uma_zfree(l2table_zone, l2); -} - -/* - * Pool cache constructors for L2 descriptor tables, metadata and pmap - * structures. - */ -static int -pmap_l2ptp_ctor(void *mem, int size, void *arg, int flags) -{ -#ifndef PMAP_INCLUDE_PTE_SYNC - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - - vm_offset_t va = (vm_offset_t)mem & ~PAGE_MASK; - - /* - * The mappings for these page tables were initially made using - * pmap_kenter() by the pool subsystem. Therefore, the cache- - * mode will not be right for page table mappings. To avoid - * polluting the pmap_kenter() code with a special case for - * page tables, we simply fix up the cache-mode here if it's not - * correct. - */ - l2b = pmap_get_l2_bucket(kernel_pmap, va); - ptep = &l2b->l2b_kva[l2pte_index(va)]; - pte = *ptep; - - if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { - /* - * Page tables must have the cache-mode set to - * Write-Thru. - */ - *ptep = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; - PTE_SYNC(ptep); - cpu_tlb_flushD_SE(va); - cpu_cpwait(); - } -#endif - memset(mem, 0, L2_TABLE_SIZE_REAL); - PTE_SYNC_RANGE(mem, L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); - return (0); -} - -/* - * A bunch of routines to conditionally flush the caches/TLB depending - * on whether the specified pmap actually needs to be flushed at any - * given time. - */ -static PMAP_INLINE void -pmap_tlb_flushID_SE(pmap_t pm, vm_offset_t va) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushID_SE(va); -} - -static PMAP_INLINE void -pmap_tlb_flushD_SE(pmap_t pm, vm_offset_t va) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushD_SE(va); -} - -static PMAP_INLINE void -pmap_tlb_flushID(pmap_t pm) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushID(); -} -static PMAP_INLINE void -pmap_tlb_flushD(pmap_t pm) -{ - - if (pmap_is_current(pm)) - cpu_tlb_flushD(); -} - -static int -pmap_has_valid_mapping(pmap_t pm, vm_offset_t va) -{ - pd_entry_t *pde; - pt_entry_t *ptep; - - if (pmap_get_pde_pte(pm, va, &pde, &ptep) && - ptep && ((*ptep & L2_TYPE_MASK) != L2_TYPE_INV)) - return (1); - - return (0); -} - -static PMAP_INLINE void -pmap_idcache_wbinv_range(pmap_t pm, vm_offset_t va, vm_size_t len) -{ - vm_size_t rest; - - CTR4(KTR_PMAP, "pmap_dcache_wbinv_range: pmap %p is_kernel %d va 0x%08x" - " len 0x%x ", pm, pm == kernel_pmap, va, len); - - if (pmap_is_current(pm) || pm == kernel_pmap) { - rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); - while (len > 0) { - if (pmap_has_valid_mapping(pm, va)) { - cpu_idcache_wbinv_range(va, rest); - cpu_l2cache_wbinv_range(va, rest); - } - len -= rest; - va += rest; - rest = MIN(PAGE_SIZE, len); - } - } -} - -static PMAP_INLINE void -pmap_dcache_wb_range(pmap_t pm, vm_offset_t va, vm_size_t len, boolean_t do_inv, - boolean_t rd_only) -{ - vm_size_t rest; - - CTR4(KTR_PMAP, "pmap_dcache_wb_range: pmap %p is_kernel %d va 0x%08x " - "len 0x%x ", pm, pm == kernel_pmap, va, len); - CTR2(KTR_PMAP, " do_inv %d rd_only %d", do_inv, rd_only); - - if (pmap_is_current(pm)) { - rest = MIN(PAGE_SIZE - (va & PAGE_MASK), len); - while (len > 0) { - if (pmap_has_valid_mapping(pm, va)) { - if (do_inv && rd_only) { - cpu_dcache_inv_range(va, rest); - cpu_l2cache_inv_range(va, rest); - } else if (do_inv) { - cpu_dcache_wbinv_range(va, rest); - cpu_l2cache_wbinv_range(va, rest); - } else if (!rd_only) { - cpu_dcache_wb_range(va, rest); - cpu_l2cache_wb_range(va, rest); - } - } - len -= rest; - va += rest; - - rest = MIN(PAGE_SIZE, len); - } - } -} - -static PMAP_INLINE void -pmap_idcache_wbinv_all(pmap_t pm) -{ - - if (pmap_is_current(pm)) { - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - } -} - -#ifdef notyet -static PMAP_INLINE void -pmap_dcache_wbinv_all(pmap_t pm) -{ - - if (pmap_is_current(pm)) { - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - } -} -#endif - -/* - * PTE_SYNC_CURRENT: - * - * Make sure the pte is written out to RAM. - * We need to do this for one of two cases: - * - We're dealing with the kernel pmap - * - There is no pmap active in the cache/tlb. - * - The specified pmap is 'active' in the cache/tlb. - */ -#ifdef PMAP_INCLUDE_PTE_SYNC -#define PTE_SYNC_CURRENT(pm, ptep) \ -do { \ - if (PMAP_NEEDS_PTE_SYNC && \ - pmap_is_current(pm)) \ - PTE_SYNC(ptep); \ -} while (/*CONSTCOND*/0) -#else -#define PTE_SYNC_CURRENT(pm, ptep) /* nothing */ -#endif - -/* - * cacheable == -1 means we must make the entry uncacheable, 1 means - * cacheable; - */ -static __inline void -pmap_set_cache_entry(pv_entry_t pv, pmap_t pm, vm_offset_t va, int cacheable) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - - l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); - ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - - if (cacheable == 1) { - pte = (*ptep & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode; - if (l2pte_valid(pte)) { - if (PV_BEEN_EXECD(pv->pv_flags)) { - pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); - } else if (PV_BEEN_REFD(pv->pv_flags)) { - pmap_tlb_flushD_SE(pv->pv_pmap, pv->pv_va); - } - } - } else { - pte = *ptep &~ L2_S_CACHE_MASK; - if ((va != pv->pv_va || pm != pv->pv_pmap) && - l2pte_valid(pte)) { - if (PV_BEEN_EXECD(pv->pv_flags)) { - pmap_idcache_wbinv_range(pv->pv_pmap, - pv->pv_va, PAGE_SIZE); - pmap_tlb_flushID_SE(pv->pv_pmap, pv->pv_va); - } else if (PV_BEEN_REFD(pv->pv_flags)) { - pmap_dcache_wb_range(pv->pv_pmap, - pv->pv_va, PAGE_SIZE, TRUE, - (pv->pv_flags & PVF_WRITE) == 0); - pmap_tlb_flushD_SE(pv->pv_pmap, - pv->pv_va); - } - } - } - *ptep = pte; - PTE_SYNC_CURRENT(pv->pv_pmap, ptep); -} - -static void -pmap_fix_cache(struct vm_page *pg, pmap_t pm, vm_offset_t va) -{ - int pmwc = 0; - int writable = 0, kwritable = 0, uwritable = 0; - int entries = 0, kentries = 0, uentries = 0; - struct pv_entry *pv; - - rw_assert(&pvh_global_lock, RA_WLOCKED); - - /* the cache gets written back/invalidated on context switch. - * therefore, if a user page shares an entry in the same page or - * with the kernel map and at least one is writable, then the - * cache entry must be set write-through. - */ - - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { - /* generate a count of the pv_entry uses */ - if (pv->pv_flags & PVF_WRITE) { - if (pv->pv_pmap == kernel_pmap) - kwritable++; - else if (pv->pv_pmap == pm) - uwritable++; - writable++; - } - if (pv->pv_pmap == kernel_pmap) - kentries++; - else { - if (pv->pv_pmap == pm) - uentries++; - entries++; - } - } - /* - * check if the user duplicate mapping has - * been removed. - */ - if ((pm != kernel_pmap) && (((uentries > 1) && uwritable) || - (uwritable > 1))) - pmwc = 1; - - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { - /* check for user uncachable conditions - order is important */ - if (pm != kernel_pmap && - (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap)) { - - if ((uentries > 1 && uwritable) || uwritable > 1) { - - /* user duplicate mapping */ - if (pv->pv_pmap != kernel_pmap) - pv->pv_flags |= PVF_MWC; - - if (!(pv->pv_flags & PVF_NC)) { - pv->pv_flags |= PVF_NC; - pmap_set_cache_entry(pv, pm, va, -1); - } - continue; - } else /* no longer a duplicate user */ - pv->pv_flags &= ~PVF_MWC; - } - - /* - * check for kernel uncachable conditions - * kernel writable or kernel readable with writable user entry - */ - if ((kwritable && (entries || kentries > 1)) || - (kwritable > 1) || - ((kwritable != writable) && kentries && - (pv->pv_pmap == kernel_pmap || - (pv->pv_flags & PVF_WRITE) || - (pv->pv_flags & PVF_MWC)))) { - - if (!(pv->pv_flags & PVF_NC)) { - pv->pv_flags |= PVF_NC; - pmap_set_cache_entry(pv, pm, va, -1); - } - continue; - } - - /* kernel and user are cachable */ - if ((pm == kernel_pmap) && !(pv->pv_flags & PVF_MWC) && - (pv->pv_flags & PVF_NC)) { - - pv->pv_flags &= ~PVF_NC; - if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) - pmap_set_cache_entry(pv, pm, va, 1); - continue; - } - /* user is no longer sharable and writable */ - if (pm != kernel_pmap && - (pv->pv_pmap == pm || pv->pv_pmap == kernel_pmap) && - !pmwc && (pv->pv_flags & PVF_NC)) { - - pv->pv_flags &= ~(PVF_NC | PVF_MWC); - if (pg->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) - pmap_set_cache_entry(pv, pm, va, 1); - } - } - - if ((kwritable == 0) && (writable == 0)) { - pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_aflag_clear(pg, PGA_WRITEABLE); - return; - } -} - -/* - * Modify pte bits for all ptes corresponding to the given physical address. - * We use `maskbits' rather than `clearbits' because we're always passing - * constants and the latter would require an extra inversion at run-time. - */ -static int -pmap_clearbit(struct vm_page *pg, u_int maskbits) -{ - struct l2_bucket *l2b; - struct pv_entry *pv; - pt_entry_t *ptep, npte, opte; - pmap_t pm; - vm_offset_t va; - u_int oflags; - int count = 0; - - rw_wlock(&pvh_global_lock); - - if (maskbits & PVF_WRITE) - maskbits |= PVF_MOD; - /* - * Clear saved attributes (modify, reference) - */ - pg->md.pvh_attrs &= ~(maskbits & (PVF_MOD | PVF_REF)); - - if (TAILQ_EMPTY(&pg->md.pv_list)) { - rw_wunlock(&pvh_global_lock); - return (0); - } - - /* - * Loop over all current mappings setting/clearing as appropos - */ - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) { - va = pv->pv_va; - pm = pv->pv_pmap; - oflags = pv->pv_flags; - - if (!(oflags & maskbits)) { - if ((maskbits & PVF_WRITE) && (pv->pv_flags & PVF_NC)) { - if (pg->md.pv_memattr != - VM_MEMATTR_UNCACHEABLE) { - PMAP_LOCK(pm); - l2b = pmap_get_l2_bucket(pm, va); - ptep = &l2b->l2b_kva[l2pte_index(va)]; - *ptep |= pte_l2_s_cache_mode; - PTE_SYNC(ptep); - PMAP_UNLOCK(pm); - } - pv->pv_flags &= ~(PVF_NC | PVF_MWC); - } - continue; - } - pv->pv_flags &= ~maskbits; - - PMAP_LOCK(pm); - - l2b = pmap_get_l2_bucket(pm, va); - - ptep = &l2b->l2b_kva[l2pte_index(va)]; - npte = opte = *ptep; - - if (maskbits & (PVF_WRITE|PVF_MOD)) { - if ((pv->pv_flags & PVF_NC)) { - /* - * Entry is not cacheable: - * - * Don't turn caching on again if this is a - * modified emulation. This would be - * inconsitent with the settings created by - * pmap_fix_cache(). Otherwise, it's safe - * to re-enable cacheing. - * - * There's no need to call pmap_fix_cache() - * here: all pages are losing their write - * permission. - */ - if (maskbits & PVF_WRITE) { - if (pg->md.pv_memattr != - VM_MEMATTR_UNCACHEABLE) - npte |= pte_l2_s_cache_mode; - pv->pv_flags &= ~(PVF_NC | PVF_MWC); - } - } else - if (opte & L2_S_PROT_W) { - vm_page_dirty(pg); - /* - * Entry is writable/cacheable: check if pmap - * is current if it is flush it, otherwise it - * won't be in the cache - */ - if (PV_BEEN_EXECD(oflags)) - pmap_idcache_wbinv_range(pm, pv->pv_va, - PAGE_SIZE); - else - if (PV_BEEN_REFD(oflags)) - pmap_dcache_wb_range(pm, pv->pv_va, - PAGE_SIZE, - (maskbits & PVF_REF) ? TRUE : FALSE, - FALSE); - } - - /* make the pte read only */ - npte &= ~L2_S_PROT_W; - } - - if (maskbits & PVF_REF) { - if ((pv->pv_flags & PVF_NC) == 0 && - (maskbits & (PVF_WRITE|PVF_MOD)) == 0) { - /* - * Check npte here; we may have already - * done the wbinv above, and the validity - * of the PTE is the same for opte and - * npte. - */ - if (npte & L2_S_PROT_W) { - if (PV_BEEN_EXECD(oflags)) - pmap_idcache_wbinv_range(pm, - pv->pv_va, PAGE_SIZE); - else - if (PV_BEEN_REFD(oflags)) - pmap_dcache_wb_range(pm, - pv->pv_va, PAGE_SIZE, - TRUE, FALSE); - } else - if ((npte & L2_TYPE_MASK) != L2_TYPE_INV) { - /* XXXJRT need idcache_inv_range */ - if (PV_BEEN_EXECD(oflags)) - pmap_idcache_wbinv_range(pm, - pv->pv_va, PAGE_SIZE); - else - if (PV_BEEN_REFD(oflags)) - pmap_dcache_wb_range(pm, - pv->pv_va, PAGE_SIZE, - TRUE, TRUE); - } - } - - /* - * Make the PTE invalid so that we will take a - * page fault the next time the mapping is - * referenced. - */ - npte &= ~L2_TYPE_MASK; - npte |= L2_TYPE_INV; - } - - if (npte != opte) { - count++; - *ptep = npte; - PTE_SYNC(ptep); - /* Flush the TLB entry if a current pmap. */ - if (PV_BEEN_EXECD(oflags)) - pmap_tlb_flushID_SE(pm, pv->pv_va); - else - if (PV_BEEN_REFD(oflags)) - pmap_tlb_flushD_SE(pm, pv->pv_va); - } - - PMAP_UNLOCK(pm); - - } - - if (maskbits & PVF_WRITE) - vm_page_aflag_clear(pg, PGA_WRITEABLE); - rw_wunlock(&pvh_global_lock); - return (count); -} - -/* - * main pv_entry manipulation functions: - * pmap_enter_pv: enter a mapping onto a vm_page list - * pmap_remove_pv: remove a mappiing from a vm_page list - * - * NOTE: pmap_enter_pv expects to lock the pvh itself - * pmap_remove_pv expects the caller to lock the pvh before calling - */ - -/* - * pmap_enter_pv: enter a mapping onto a vm_page's PV list - * - * => caller should hold the proper lock on pvh_global_lock - * => caller should have pmap locked - * => we will (someday) gain the lock on the vm_page's PV list - * => caller should adjust ptp's wire_count before calling - * => caller should not adjust pmap's wire_count - */ -static void -pmap_enter_pv(struct vm_page *pg, struct pv_entry *pve, pmap_t pm, - vm_offset_t va, u_int flags) -{ - - rw_assert(&pvh_global_lock, RA_WLOCKED); - PMAP_ASSERT_LOCKED(pm); - if (pg->md.pv_kva != 0) { - pve->pv_pmap = kernel_pmap; - pve->pv_va = pg->md.pv_kva; - pve->pv_flags = PVF_WRITE | PVF_UNMAN; - if (pm != kernel_pmap) - PMAP_LOCK(kernel_pmap); - TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); - TAILQ_INSERT_HEAD(&kernel_pmap->pm_pvlist, pve, pv_plist); - if (pm != kernel_pmap) - PMAP_UNLOCK(kernel_pmap); - pg->md.pv_kva = 0; - if ((pve = pmap_get_pv_entry()) == NULL) - panic("pmap_kenter_pv: no pv entries"); - } - pve->pv_pmap = pm; - pve->pv_va = va; - pve->pv_flags = flags; - TAILQ_INSERT_HEAD(&pg->md.pv_list, pve, pv_list); - TAILQ_INSERT_HEAD(&pm->pm_pvlist, pve, pv_plist); - pg->md.pvh_attrs |= flags & (PVF_REF | PVF_MOD); - if (pve->pv_flags & PVF_WIRED) - ++pm->pm_stats.wired_count; - vm_page_aflag_set(pg, PGA_REFERENCED); -} - -/* - * - * pmap_find_pv: Find a pv entry - * - * => caller should hold lock on vm_page - */ -static PMAP_INLINE struct pv_entry * -pmap_find_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) -{ - struct pv_entry *pv; - - rw_assert(&pvh_global_lock, RA_WLOCKED); - TAILQ_FOREACH(pv, &pg->md.pv_list, pv_list) - if (pm == pv->pv_pmap && va == pv->pv_va) - break; - return (pv); -} - -/* - * vector_page_setprot: - * - * Manipulate the protection of the vector page. - */ -void -vector_page_setprot(int prot) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep; - - l2b = pmap_get_l2_bucket(kernel_pmap, vector_page); - - ptep = &l2b->l2b_kva[l2pte_index(vector_page)]; - - *ptep = (*ptep & ~L1_S_PROT_MASK) | L2_S_PROT(PTE_KERNEL, prot); - PTE_SYNC(ptep); - cpu_tlb_flushD_SE(vector_page); - cpu_cpwait(); -} - -/* - * pmap_remove_pv: try to remove a mapping from a pv_list - * - * => caller should hold proper lock on pmap_main_lock - * => pmap should be locked - * => caller should hold lock on vm_page [so that attrs can be adjusted] - * => caller should adjust ptp's wire_count and free PTP if needed - * => caller should NOT adjust pmap's wire_count - * => we return the removed pve - */ - -static void -pmap_nuke_pv(struct vm_page *pg, pmap_t pm, struct pv_entry *pve) -{ - - struct pv_entry *pv; - rw_assert(&pvh_global_lock, RA_WLOCKED); - PMAP_ASSERT_LOCKED(pm); - TAILQ_REMOVE(&pg->md.pv_list, pve, pv_list); - TAILQ_REMOVE(&pm->pm_pvlist, pve, pv_plist); - if (pve->pv_flags & PVF_WIRED) - --pm->pm_stats.wired_count; - if (pg->md.pvh_attrs & PVF_MOD) - vm_page_dirty(pg); - if (TAILQ_FIRST(&pg->md.pv_list) == NULL) - pg->md.pvh_attrs &= ~PVF_REF; - else - vm_page_aflag_set(pg, PGA_REFERENCED); - if ((pve->pv_flags & PVF_NC) && ((pm == kernel_pmap) || - (pve->pv_flags & PVF_WRITE) || !(pve->pv_flags & PVF_MWC))) - pmap_fix_cache(pg, pm, 0); - else if (pve->pv_flags & PVF_WRITE) { - TAILQ_FOREACH(pve, &pg->md.pv_list, pv_list) - if (pve->pv_flags & PVF_WRITE) - break; - if (!pve) { - pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_aflag_clear(pg, PGA_WRITEABLE); - } - } - pv = TAILQ_FIRST(&pg->md.pv_list); - if (pv != NULL && (pv->pv_flags & PVF_UNMAN) && - TAILQ_NEXT(pv, pv_list) == NULL) { - pm = kernel_pmap; - pg->md.pv_kva = pv->pv_va; - /* a recursive pmap_nuke_pv */ - TAILQ_REMOVE(&pg->md.pv_list, pv, pv_list); - TAILQ_REMOVE(&pm->pm_pvlist, pv, pv_plist); - if (pv->pv_flags & PVF_WIRED) - --pm->pm_stats.wired_count; - pg->md.pvh_attrs &= ~PVF_REF; - pg->md.pvh_attrs &= ~PVF_MOD; - vm_page_aflag_clear(pg, PGA_WRITEABLE); - pmap_free_pv_entry(pv); - } -} - -static struct pv_entry * -pmap_remove_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va) -{ - struct pv_entry *pve; - - rw_assert(&pvh_global_lock, RA_WLOCKED); - pve = TAILQ_FIRST(&pg->md.pv_list); - - while (pve) { - if (pve->pv_pmap == pm && pve->pv_va == va) { /* match? */ - pmap_nuke_pv(pg, pm, pve); - break; - } - pve = TAILQ_NEXT(pve, pv_list); - } - - if (pve == NULL && pg->md.pv_kva == va) - pg->md.pv_kva = 0; - - return(pve); /* return removed pve */ -} -/* - * - * pmap_modify_pv: Update pv flags - * - * => caller should hold lock on vm_page [so that attrs can be adjusted] - * => caller should NOT adjust pmap's wire_count - * => we return the old flags - * - * Modify a physical-virtual mapping in the pv table - */ -static u_int -pmap_modify_pv(struct vm_page *pg, pmap_t pm, vm_offset_t va, - u_int clr_mask, u_int set_mask) -{ - struct pv_entry *npv; - u_int flags, oflags; - - PMAP_ASSERT_LOCKED(pm); - rw_assert(&pvh_global_lock, RA_WLOCKED); - if ((npv = pmap_find_pv(pg, pm, va)) == NULL) - return (0); - - /* - * There is at least one VA mapping this page. - */ - - if (clr_mask & (PVF_REF | PVF_MOD)) - pg->md.pvh_attrs |= set_mask & (PVF_REF | PVF_MOD); - - oflags = npv->pv_flags; - npv->pv_flags = flags = (oflags & ~clr_mask) | set_mask; - - if ((flags ^ oflags) & PVF_WIRED) { - if (flags & PVF_WIRED) - ++pm->pm_stats.wired_count; - else - --pm->pm_stats.wired_count; - } - - if ((flags ^ oflags) & PVF_WRITE) - pmap_fix_cache(pg, pm, 0); - - return (oflags); -} - -/* Function to set the debug level of the pmap code */ -#ifdef PMAP_DEBUG -void -pmap_debug(int level) -{ - pmap_debug_level = level; - dprintf("pmap_debug: level=%d\n", pmap_debug_level); -} -#endif /* PMAP_DEBUG */ - -void -pmap_pinit0(struct pmap *pmap) -{ - PDEBUG(1, printf("pmap_pinit0: pmap = %08x\n", (u_int32_t) pmap)); - - bcopy(kernel_pmap, pmap, sizeof(*pmap)); - bzero(&pmap->pm_mtx, sizeof(pmap->pm_mtx)); - PMAP_LOCK_INIT(pmap); -} - -/* - * Initialize a vm_page's machine-dependent fields. - */ -void -pmap_page_init(vm_page_t m) -{ - - TAILQ_INIT(&m->md.pv_list); - m->md.pv_memattr = VM_MEMATTR_DEFAULT; -} - -/* - * Initialize the pmap module. - * Called by vm_init, to initialize any structures that the pmap - * system needs to map virtual memory. - */ -void -pmap_init(void) -{ - int shpgperproc = PMAP_SHPGPERPROC; - - l2zone = uma_zcreate("L2 Table", L2_TABLE_SIZE_REAL, pmap_l2ptp_ctor, - NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - l2table_zone = uma_zcreate("L2 Table", sizeof(struct l2_dtable), NULL, - NULL, NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - - /* - * Initialize the PV entry allocator. - */ - pvzone = uma_zcreate("PV ENTRY", sizeof (struct pv_entry), NULL, NULL, - NULL, NULL, UMA_ALIGN_PTR, UMA_ZONE_VM | UMA_ZONE_NOFREE); - TUNABLE_INT_FETCH("vm.pmap.shpgperproc", &shpgperproc); - pv_entry_max = shpgperproc * maxproc + vm_cnt.v_page_count; - uma_zone_reserve_kva(pvzone, pv_entry_max); - pv_entry_high_water = 9 * (pv_entry_max / 10); - - /* - * Now it is safe to enable pv_table recording. - */ - PDEBUG(1, printf("pmap_init: done!\n")); -} - -int -pmap_fault_fixup(pmap_t pm, vm_offset_t va, vm_prot_t ftype, int user) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - pd_entry_t *pl1pd, l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - u_int l1idx; - int rv = 0; - - l1idx = L1_IDX(va); - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - - /* - * If there is no l2_dtable for this address, then the process - * has no business accessing it. - * - * Note: This will catch userland processes trying to access - * kernel addresses. - */ - l2 = pm->pm_l2[L2_IDX(l1idx)]; - if (l2 == NULL) - goto out; - - /* - * Likewise if there is no L2 descriptor table - */ - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - if (l2b->l2b_kva == NULL) - goto out; - - /* - * Check the PTE itself. - */ - ptep = &l2b->l2b_kva[l2pte_index(va)]; - pte = *ptep; - if (pte == 0) - goto out; - - /* - * Catch a userland access to the vector page mapped at 0x0 - */ - if (user && (pte & L2_S_PROT_U) == 0) - goto out; - if (va == vector_page) - goto out; - - pa = l2pte_pa(pte); - - if ((ftype & VM_PROT_WRITE) && (pte & L2_S_PROT_W) == 0) { - /* - * This looks like a good candidate for "page modified" - * emulation... - */ - struct pv_entry *pv; - struct vm_page *pg; - - /* Extract the physical address of the page */ - if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) { - goto out; - } - /* Get the current flags for this page. */ - - pv = pmap_find_pv(pg, pm, va); - if (pv == NULL) { - goto out; - } - - /* - * Do the flags say this page is writable? If not then it - * is a genuine write fault. If yes then the write fault is - * our fault as we did not reflect the write access in the - * PTE. Now we know a write has occurred we can correct this - * and also set the modified bit - */ - if ((pv->pv_flags & PVF_WRITE) == 0) { - goto out; - } - - pg->md.pvh_attrs |= PVF_REF | PVF_MOD; - vm_page_dirty(pg); - pv->pv_flags |= PVF_REF | PVF_MOD; - - /* - * Re-enable write permissions for the page. No need to call - * pmap_fix_cache(), since this is just a - * modified-emulation fault, and the PVF_WRITE bit isn't - * changing. We've already set the cacheable bits based on - * the assumption that we can write to this page. - */ - *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO | L2_S_PROT_W; - PTE_SYNC(ptep); - rv = 1; - } else - if ((pte & L2_TYPE_MASK) == L2_TYPE_INV) { - /* - * This looks like a good candidate for "page referenced" - * emulation. - */ - struct pv_entry *pv; - struct vm_page *pg; - - /* Extract the physical address of the page */ - if ((pg = PHYS_TO_VM_PAGE(pa)) == NULL) - goto out; - /* Get the current flags for this page. */ - - pv = pmap_find_pv(pg, pm, va); - if (pv == NULL) - goto out; - - pg->md.pvh_attrs |= PVF_REF; - pv->pv_flags |= PVF_REF; - - - *ptep = (pte & ~L2_TYPE_MASK) | L2_S_PROTO; - PTE_SYNC(ptep); - rv = 1; - } - - /* - * We know there is a valid mapping here, so simply - * fix up the L1 if necessary. - */ - pl1pd = &pm->pm_l1->l1_kva[l1idx]; - l1pd = l2b->l2b_phys | L1_C_DOM(pm->pm_domain) | L1_C_PROTO; - if (*pl1pd != l1pd) { - *pl1pd = l1pd; - PTE_SYNC(pl1pd); - rv = 1; - } - -#ifdef DEBUG - /* - * If 'rv == 0' at this point, it generally indicates that there is a - * stale TLB entry for the faulting address. This happens when two or - * more processes are sharing an L1. Since we don't flush the TLB on - * a context switch between such processes, we can take domain faults - * for mappings which exist at the same VA in both processes. EVEN IF - * WE'VE RECENTLY FIXED UP THE CORRESPONDING L1 in pmap_enter(), for - * example. - * - * This is extremely likely to happen if pmap_enter() updated the L1 - * entry for a recently entered mapping. In this case, the TLB is - * flushed for the new mapping, but there may still be TLB entries for - * other mappings belonging to other processes in the 1MB range - * covered by the L1 entry. - * - * Since 'rv == 0', we know that the L1 already contains the correct - * value, so the fault must be due to a stale TLB entry. - * - * Since we always need to flush the TLB anyway in the case where we - * fixed up the L1, or frobbed the L2 PTE, we effectively deal with - * stale TLB entries dynamically. - * - * However, the above condition can ONLY happen if the current L1 is - * being shared. If it happens when the L1 is unshared, it indicates - * that other parts of the pmap are not doing their job WRT managing - * the TLB. - */ - if (rv == 0 && pm->pm_l1->l1_domain_use_count == 1) { - printf("fixup: pm %p, va 0x%lx, ftype %d - nothing to do!\n", - pm, (u_long)va, ftype); - printf("fixup: l2 %p, l2b %p, ptep %p, pl1pd %p\n", - l2, l2b, ptep, pl1pd); - printf("fixup: pte 0x%x, l1pd 0x%x, last code 0x%x\n", - pte, l1pd, last_fault_code); -#ifdef DDB - Debugger(); -#endif - } -#endif - - cpu_tlb_flushID_SE(va); - cpu_cpwait(); - - rv = 1; - -out: - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pm); - return (rv); -} - -void -pmap_postinit(void) -{ - struct l2_bucket *l2b; - struct l1_ttable *l1; - pd_entry_t *pl1pt; - pt_entry_t *ptep, pte; - vm_offset_t va, eva; - u_int loop, needed; - - needed = (maxproc / PMAP_DOMAINS) + ((maxproc % PMAP_DOMAINS) ? 1 : 0); - needed -= 1; - l1 = malloc(sizeof(*l1) * needed, M_VMPMAP, M_WAITOK); - - for (loop = 0; loop < needed; loop++, l1++) { - /* Allocate a L1 page table */ - va = (vm_offset_t)contigmalloc(L1_TABLE_SIZE, M_VMPMAP, 0, 0x0, - 0xffffffff, L1_TABLE_SIZE, 0); - - if (va == 0) - panic("Cannot allocate L1 KVM"); - - eva = va + L1_TABLE_SIZE; - pl1pt = (pd_entry_t *)va; - - while (va < eva) { - l2b = pmap_get_l2_bucket(kernel_pmap, va); - ptep = &l2b->l2b_kva[l2pte_index(va)]; - pte = *ptep; - pte = (pte & ~L2_S_CACHE_MASK) | pte_l2_s_cache_mode_pt; - *ptep = pte; - PTE_SYNC(ptep); - cpu_tlb_flushD_SE(va); - - va += PAGE_SIZE; - } - pmap_init_l1(l1, pl1pt); - } - - -#ifdef DEBUG - printf("pmap_postinit: Allocated %d static L1 descriptor tables\n", - needed); -#endif -} - -/* - * This is used to stuff certain critical values into the PCB where they - * can be accessed quickly from cpu_switch() et al. - */ -void -pmap_set_pcb_pagedir(pmap_t pm, struct pcb *pcb) -{ - struct l2_bucket *l2b; - - pcb->pcb_pagedir = pm->pm_l1->l1_physaddr; - pcb->pcb_dacr = (DOMAIN_CLIENT << (PMAP_DOMAIN_KERNEL * 2)) | - (DOMAIN_CLIENT << (pm->pm_domain * 2)); - - if (vector_page < KERNBASE) { - pcb->pcb_pl1vec = &pm->pm_l1->l1_kva[L1_IDX(vector_page)]; - l2b = pmap_get_l2_bucket(pm, vector_page); - pcb->pcb_l1vec = l2b->l2b_phys | L1_C_PROTO | - L1_C_DOM(pm->pm_domain) | L1_C_DOM(PMAP_DOMAIN_KERNEL); - } else - pcb->pcb_pl1vec = NULL; -} - -void -pmap_activate(struct thread *td) -{ - pmap_t pm; - struct pcb *pcb; - - pm = vmspace_pmap(td->td_proc->p_vmspace); - pcb = td->td_pcb; - - critical_enter(); - pmap_set_pcb_pagedir(pm, pcb); - - if (td == curthread) { - u_int cur_dacr, cur_ttb; - - __asm __volatile("mrc p15, 0, %0, c2, c0, 0" : "=r"(cur_ttb)); - __asm __volatile("mrc p15, 0, %0, c3, c0, 0" : "=r"(cur_dacr)); - - cur_ttb &= ~(L1_TABLE_SIZE - 1); - - if (cur_ttb == (u_int)pcb->pcb_pagedir && - cur_dacr == pcb->pcb_dacr) { - /* - * No need to switch address spaces. - */ - critical_exit(); - return; - } - - - /* - * We MUST, I repeat, MUST fix up the L1 entry corresponding - * to 'vector_page' in the incoming L1 table before switching - * to it otherwise subsequent interrupts/exceptions (including - * domain faults!) will jump into hyperspace. - */ - if (pcb->pcb_pl1vec) { - - *pcb->pcb_pl1vec = pcb->pcb_l1vec; - /* - * Don't need to PTE_SYNC() at this point since - * cpu_setttb() is about to flush both the cache - * and the TLB. - */ - } - - cpu_domains(pcb->pcb_dacr); - cpu_setttb(pcb->pcb_pagedir); - } - critical_exit(); -} - -static int -pmap_set_pt_cache_mode(pd_entry_t *kl1, vm_offset_t va) -{ - pd_entry_t *pdep, pde; - pt_entry_t *ptep, pte; - vm_offset_t pa; - int rv = 0; - - /* - * Make sure the descriptor itself has the correct cache mode - */ - pdep = &kl1[L1_IDX(va)]; - pde = *pdep; - - if (l1pte_section_p(pde)) { - if ((pde & L1_S_CACHE_MASK) != pte_l1_s_cache_mode_pt) { - *pdep = (pde & ~L1_S_CACHE_MASK) | - pte_l1_s_cache_mode_pt; - PTE_SYNC(pdep); - cpu_dcache_wbinv_range((vm_offset_t)pdep, - sizeof(*pdep)); - cpu_l2cache_wbinv_range((vm_offset_t)pdep, - sizeof(*pdep)); - rv = 1; - } - } else { - pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); - ptep = (pt_entry_t *)kernel_pt_lookup(pa); - if (ptep == NULL) - panic("pmap_bootstrap: No L2 for L2 @ va %p\n", ptep); - - ptep = &ptep[l2pte_index(va)]; - pte = *ptep; - if ((pte & L2_S_CACHE_MASK) != pte_l2_s_cache_mode_pt) { - *ptep = (pte & ~L2_S_CACHE_MASK) | - pte_l2_s_cache_mode_pt; - PTE_SYNC(ptep); - cpu_dcache_wbinv_range((vm_offset_t)ptep, - sizeof(*ptep)); - cpu_l2cache_wbinv_range((vm_offset_t)ptep, - sizeof(*ptep)); - rv = 1; - } - } - - return (rv); -} - -static void -pmap_alloc_specials(vm_offset_t *availp, int pages, vm_offset_t *vap, - pt_entry_t **ptep) -{ - vm_offset_t va = *availp; - struct l2_bucket *l2b; - - if (ptep) { - l2b = pmap_get_l2_bucket(kernel_pmap, va); - if (l2b == NULL) - panic("pmap_alloc_specials: no l2b for 0x%x", va); - - *ptep = &l2b->l2b_kva[l2pte_index(va)]; - } - - *vap = va; - *availp = va + (PAGE_SIZE * pages); -} - -/* - * Bootstrap the system enough to run with virtual memory. - * - * On the arm this is called after mapping has already been enabled - * and just syncs the pmap module with what has already been done. - * [We can't call it easily with mapping off since the kernel is not - * mapped with PA == VA, hence we would have to relocate every address - * from the linked base (virtual) address "KERNBASE" to the actual - * (physical) address starting relative to 0] - */ -#define PMAP_STATIC_L2_SIZE 16 -void -pmap_bootstrap(vm_offset_t firstaddr, struct pv_addr *l1pt) -{ - static struct l1_ttable static_l1; - static struct l2_dtable static_l2[PMAP_STATIC_L2_SIZE]; - struct l1_ttable *l1 = &static_l1; - struct l2_dtable *l2; - struct l2_bucket *l2b; - pd_entry_t pde; - pd_entry_t *kernel_l1pt = (pd_entry_t *)l1pt->pv_va; - pt_entry_t *ptep; - pt_entry_t *qmap_pte; - vm_paddr_t pa; - vm_offset_t va; - vm_size_t size; - int l1idx, l2idx, l2next = 0; - - PDEBUG(1, printf("firstaddr = %08x, lastaddr = %08x\n", - firstaddr, vm_max_kernel_address)); - - virtual_avail = firstaddr; - kernel_pmap->pm_l1 = l1; - kernel_l1pa = l1pt->pv_pa; - - /* - * Scan the L1 translation table created by initarm() and create - * the required metadata for all valid mappings found in it. - */ - for (l1idx = 0; l1idx < (L1_TABLE_SIZE / sizeof(pd_entry_t)); l1idx++) { - pde = kernel_l1pt[l1idx]; - - /* - * We're only interested in Coarse mappings. - * pmap_extract() can deal with section mappings without - * recourse to checking L2 metadata. - */ - if ((pde & L1_TYPE_MASK) != L1_TYPE_C) - continue; - - /* - * Lookup the KVA of this L2 descriptor table - */ - pa = (vm_paddr_t)(pde & L1_C_ADDR_MASK); - ptep = (pt_entry_t *)kernel_pt_lookup(pa); - - if (ptep == NULL) { - panic("pmap_bootstrap: No L2 for va 0x%x, pa 0x%lx", - (u_int)l1idx << L1_S_SHIFT, (long unsigned int)pa); - } - - /* - * Fetch the associated L2 metadata structure. - * Allocate a new one if necessary. - */ - if ((l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]) == NULL) { - if (l2next == PMAP_STATIC_L2_SIZE) - panic("pmap_bootstrap: out of static L2s"); - kernel_pmap->pm_l2[L2_IDX(l1idx)] = l2 = - &static_l2[l2next++]; - } - - /* - * One more L1 slot tracked... - */ - l2->l2_occupancy++; - - /* - * Fill in the details of the L2 descriptor in the - * appropriate bucket. - */ - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - l2b->l2b_kva = ptep; - l2b->l2b_phys = pa; - l2b->l2b_l1idx = l1idx; - - /* - * Establish an initial occupancy count for this descriptor - */ - for (l2idx = 0; - l2idx < (L2_TABLE_SIZE_REAL / sizeof(pt_entry_t)); - l2idx++) { - if ((ptep[l2idx] & L2_TYPE_MASK) != L2_TYPE_INV) { - l2b->l2b_occupancy++; - } - } - - /* - * Make sure the descriptor itself has the correct cache mode. - * If not, fix it, but whine about the problem. Port-meisters - * should consider this a clue to fix up their initarm() - * function. :) - */ - if (pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)ptep)) { - printf("pmap_bootstrap: WARNING! wrong cache mode for " - "L2 pte @ %p\n", ptep); - } - } - - - /* - * Ensure the primary (kernel) L1 has the correct cache mode for - * a page table. Bitch if it is not correctly set. - */ - for (va = (vm_offset_t)kernel_l1pt; - va < ((vm_offset_t)kernel_l1pt + L1_TABLE_SIZE); va += PAGE_SIZE) { - if (pmap_set_pt_cache_mode(kernel_l1pt, va)) - printf("pmap_bootstrap: WARNING! wrong cache mode for " - "primary L1 @ 0x%x\n", va); - } - - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - cpu_tlb_flushID(); - cpu_cpwait(); - - PMAP_LOCK_INIT(kernel_pmap); - CPU_FILL(&kernel_pmap->pm_active); - kernel_pmap->pm_domain = PMAP_DOMAIN_KERNEL; - TAILQ_INIT(&kernel_pmap->pm_pvlist); - - /* - * Initialize the global pv list lock. - */ - rw_init_flags(&pvh_global_lock, "pmap pv global", RW_RECURSE); - - /* - * Reserve some special page table entries/VA space for temporary - * mapping of pages. - */ - pmap_alloc_specials(&virtual_avail, 1, &csrcp, &csrc_pte); - pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)csrc_pte); - pmap_alloc_specials(&virtual_avail, 1, &cdstp, &cdst_pte); - pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)cdst_pte); - pmap_alloc_specials(&virtual_avail, 1, &qmap_addr, &qmap_pte); - pmap_set_pt_cache_mode(kernel_l1pt, (vm_offset_t)qmap_pte); - size = ((vm_max_kernel_address - pmap_curmaxkvaddr) + L1_S_OFFSET) / - L1_S_SIZE; - pmap_alloc_specials(&virtual_avail, - round_page(size * L2_TABLE_SIZE_REAL) / PAGE_SIZE, - &pmap_kernel_l2ptp_kva, NULL); - - size = (size + (L2_BUCKET_SIZE - 1)) / L2_BUCKET_SIZE; - pmap_alloc_specials(&virtual_avail, - round_page(size * sizeof(struct l2_dtable)) / PAGE_SIZE, - &pmap_kernel_l2dtable_kva, NULL); - - pmap_alloc_specials(&virtual_avail, - 1, (vm_offset_t*)&_tmppt, NULL); - pmap_alloc_specials(&virtual_avail, - MAXDUMPPGS, (vm_offset_t *)&crashdumpmap, NULL); - SLIST_INIT(&l1_list); - TAILQ_INIT(&l1_lru_list); - mtx_init(&l1_lru_lock, "l1 list lock", NULL, MTX_DEF); - pmap_init_l1(l1, kernel_l1pt); - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - - virtual_avail = round_page(virtual_avail); - virtual_end = vm_max_kernel_address; - kernel_vm_end = pmap_curmaxkvaddr; - mtx_init(&cmtx, "TMP mappings mtx", NULL, MTX_DEF); - mtx_init(&qmap_mtx, "quick mapping mtx", NULL, MTX_DEF); - - pmap_set_pcb_pagedir(kernel_pmap, thread0.td_pcb); -} - -/*************************************************** - * Pmap allocation/deallocation routines. - ***************************************************/ - -/* - * Release any resources held by the given physical map. - * Called when a pmap initialized by pmap_pinit is being released. - * Should only be called if the map contains no valid mappings. - */ -void -pmap_release(pmap_t pmap) -{ - struct pcb *pcb; - - pmap_idcache_wbinv_all(pmap); - cpu_l2cache_wbinv_all(); - pmap_tlb_flushID(pmap); - cpu_cpwait(); - if (vector_page < KERNBASE) { - struct pcb *curpcb = PCPU_GET(curpcb); - pcb = thread0.td_pcb; - if (pmap_is_current(pmap)) { - /* - * Frob the L1 entry corresponding to the vector - * page so that it contains the kernel pmap's domain - * number. This will ensure pmap_remove() does not - * pull the current vector page out from under us. - */ - critical_enter(); - *pcb->pcb_pl1vec = pcb->pcb_l1vec; - cpu_domains(pcb->pcb_dacr); - cpu_setttb(pcb->pcb_pagedir); - critical_exit(); - } - pmap_remove(pmap, vector_page, vector_page + PAGE_SIZE); - /* - * Make sure cpu_switch(), et al, DTRT. This is safe to do - * since this process has no remaining mappings of its own. - */ - curpcb->pcb_pl1vec = pcb->pcb_pl1vec; - curpcb->pcb_l1vec = pcb->pcb_l1vec; - curpcb->pcb_dacr = pcb->pcb_dacr; - curpcb->pcb_pagedir = pcb->pcb_pagedir; - - } - pmap_free_l1(pmap); - - dprintf("pmap_release()\n"); -} - - - -/* - * Helper function for pmap_grow_l2_bucket() - */ -static __inline int -pmap_grow_map(vm_offset_t va, pt_entry_t cache_mode, vm_paddr_t *pap) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep; - vm_paddr_t pa; - struct vm_page *pg; - - pg = vm_page_alloc(NULL, 0, VM_ALLOC_NOOBJ | VM_ALLOC_WIRED); - if (pg == NULL) - return (1); - pa = VM_PAGE_TO_PHYS(pg); - - if (pap) - *pap = pa; - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - - ptep = &l2b->l2b_kva[l2pte_index(va)]; - *ptep = L2_S_PROTO | pa | cache_mode | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); - PTE_SYNC(ptep); - return (0); -} - -/* - * This is the same as pmap_alloc_l2_bucket(), except that it is only - * used by pmap_growkernel(). - */ -static __inline struct l2_bucket * -pmap_grow_l2_bucket(pmap_t pm, vm_offset_t va) -{ - struct l2_dtable *l2; - struct l2_bucket *l2b; - struct l1_ttable *l1; - pd_entry_t *pl1pd; - u_short l1idx; - vm_offset_t nva; - - l1idx = L1_IDX(va); - - if ((l2 = pm->pm_l2[L2_IDX(l1idx)]) == NULL) { - /* - * No mapping at this address, as there is - * no entry in the L1 table. - * Need to allocate a new l2_dtable. - */ - nva = pmap_kernel_l2dtable_kva; - if ((nva & PAGE_MASK) == 0) { - /* - * Need to allocate a backing page - */ - if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) - return (NULL); - } - - l2 = (struct l2_dtable *)nva; - nva += sizeof(struct l2_dtable); - - if ((nva & PAGE_MASK) < (pmap_kernel_l2dtable_kva & - PAGE_MASK)) { - /* - * The new l2_dtable straddles a page boundary. - * Map in another page to cover it. - */ - if (pmap_grow_map(nva, pte_l2_s_cache_mode, NULL)) - return (NULL); - } - - pmap_kernel_l2dtable_kva = nva; - - /* - * Link it into the parent pmap - */ - pm->pm_l2[L2_IDX(l1idx)] = l2; - memset(l2, 0, sizeof(*l2)); - } - - l2b = &l2->l2_bucket[L2_BUCKET(l1idx)]; - - /* - * Fetch pointer to the L2 page table associated with the address. - */ - if (l2b->l2b_kva == NULL) { - pt_entry_t *ptep; - - /* - * No L2 page table has been allocated. Chances are, this - * is because we just allocated the l2_dtable, above. - */ - nva = pmap_kernel_l2ptp_kva; - ptep = (pt_entry_t *)nva; - if ((nva & PAGE_MASK) == 0) { - /* - * Need to allocate a backing page - */ - if (pmap_grow_map(nva, pte_l2_s_cache_mode_pt, - &pmap_kernel_l2ptp_phys)) - return (NULL); - PTE_SYNC_RANGE(ptep, PAGE_SIZE / sizeof(pt_entry_t)); - } - memset(ptep, 0, L2_TABLE_SIZE_REAL); - l2->l2_occupancy++; - l2b->l2b_kva = ptep; - l2b->l2b_l1idx = l1idx; - l2b->l2b_phys = pmap_kernel_l2ptp_phys; - - pmap_kernel_l2ptp_kva += L2_TABLE_SIZE_REAL; - pmap_kernel_l2ptp_phys += L2_TABLE_SIZE_REAL; - } - - /* Distribute new L1 entry to all other L1s */ - SLIST_FOREACH(l1, &l1_list, l1_link) { - pl1pd = &l1->l1_kva[L1_IDX(va)]; - *pl1pd = l2b->l2b_phys | L1_C_DOM(PMAP_DOMAIN_KERNEL) | - L1_C_PROTO; - PTE_SYNC(pl1pd); - } - - return (l2b); -} - - -/* - * grow the number of kernel page table entries, if needed - */ -void -pmap_growkernel(vm_offset_t addr) -{ - pmap_t kpm = kernel_pmap; - - if (addr <= pmap_curmaxkvaddr) - return; /* we are OK */ - - /* - * whoops! we need to add kernel PTPs - */ - - /* Map 1MB at a time */ - for (; pmap_curmaxkvaddr < addr; pmap_curmaxkvaddr += L1_S_SIZE) - pmap_grow_l2_bucket(kpm, pmap_curmaxkvaddr); - - /* - * flush out the cache, expensive but growkernel will happen so - * rarely - */ - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - cpu_tlb_flushD(); - cpu_cpwait(); - kernel_vm_end = pmap_curmaxkvaddr; -} - - -/* - * Remove all pages from specified address space - * this aids process exit speeds. Also, this code - * is special cased for current process only, but - * can have the more generic (and slightly slower) - * mode enabled. This is much faster than pmap_remove - * in the case of running down an entire address space. - */ -void -pmap_remove_pages(pmap_t pmap) -{ - struct pv_entry *pv, *npv; - struct l2_bucket *l2b = NULL; - vm_page_t m; - pt_entry_t *pt; - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - for (pv = TAILQ_FIRST(&pmap->pm_pvlist); pv; pv = npv) { - if (pv->pv_flags & PVF_WIRED || pv->pv_flags & PVF_UNMAN) { - /* Cannot remove wired or unmanaged pages now. */ - npv = TAILQ_NEXT(pv, pv_plist); - continue; - } - pmap->pm_stats.resident_count--; - l2b = pmap_get_l2_bucket(pmap, pv->pv_va); - KASSERT(l2b != NULL, ("No L2 bucket in pmap_remove_pages")); - pt = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - m = PHYS_TO_VM_PAGE(*pt & L2_S_FRAME); - KASSERT((vm_offset_t)m >= KERNBASE, ("Trying to access non-existent page va %x pte %x", pv->pv_va, *pt)); - *pt = 0; - PTE_SYNC(pt); - npv = TAILQ_NEXT(pv, pv_plist); - pmap_nuke_pv(m, pmap, pv); - if (TAILQ_EMPTY(&m->md.pv_list)) - vm_page_aflag_clear(m, PGA_WRITEABLE); - pmap_free_pv_entry(pv); - pmap_free_l2_bucket(pmap, l2b, 1); - } - rw_wunlock(&pvh_global_lock); - cpu_tlb_flushID(); - cpu_cpwait(); - PMAP_UNLOCK(pmap); -} - - -/*************************************************** - * Low level mapping routines..... - ***************************************************/ - -#ifdef ARM_HAVE_SUPERSECTIONS -/* Map a super section into the KVA. */ - -void -pmap_kenter_supersection(vm_offset_t va, uint64_t pa, int flags) -{ - pd_entry_t pd = L1_S_PROTO | L1_S_SUPERSEC | (pa & L1_SUP_FRAME) | - (((pa >> 32) & 0xf) << 20) | L1_S_PROT(PTE_KERNEL, - VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); - struct l1_ttable *l1; - vm_offset_t va0, va_end; - - KASSERT(((va | pa) & L1_SUP_OFFSET) == 0, - ("Not a valid super section mapping")); - if (flags & SECTION_CACHE) - pd |= pte_l1_s_cache_mode; - else if (flags & SECTION_PT) - pd |= pte_l1_s_cache_mode_pt; - va0 = va & L1_SUP_FRAME; - va_end = va + L1_SUP_SIZE; - SLIST_FOREACH(l1, &l1_list, l1_link) { - va = va0; - for (; va < va_end; va += L1_S_SIZE) { - l1->l1_kva[L1_IDX(va)] = pd; - PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); - } - } -} -#endif - -/* Map a section into the KVA. */ - -void -pmap_kenter_section(vm_offset_t va, vm_offset_t pa, int flags) -{ - pd_entry_t pd = L1_S_PROTO | pa | L1_S_PROT(PTE_KERNEL, - VM_PROT_READ|VM_PROT_WRITE) | L1_S_DOM(PMAP_DOMAIN_KERNEL); - struct l1_ttable *l1; - - KASSERT(((va | pa) & L1_S_OFFSET) == 0, - ("Not a valid section mapping")); - if (flags & SECTION_CACHE) - pd |= pte_l1_s_cache_mode; - else if (flags & SECTION_PT) - pd |= pte_l1_s_cache_mode_pt; - SLIST_FOREACH(l1, &l1_list, l1_link) { - l1->l1_kva[L1_IDX(va)] = pd; - PTE_SYNC(&l1->l1_kva[L1_IDX(va)]); - } -} - -/* - * Make a temporary mapping for a physical address. This is only intended - * to be used for panic dumps. - */ -void * -pmap_kenter_temporary(vm_paddr_t pa, int i) -{ - vm_offset_t va; - - va = (vm_offset_t)crashdumpmap + (i * PAGE_SIZE); - pmap_kenter(va, pa); - return ((void *)crashdumpmap); -} - -/* - * add a wired page to the kva - * note that in order for the mapping to take effect -- you - * should do a invltlb after doing the pmap_kenter... - */ -static PMAP_INLINE void -pmap_kenter_internal(vm_offset_t va, vm_offset_t pa, int flags) -{ - struct l2_bucket *l2b; - pt_entry_t *pte; - pt_entry_t opte; - struct pv_entry *pve; - vm_page_t m; - - PDEBUG(1, printf("pmap_kenter: va = %08x, pa = %08x\n", - (uint32_t) va, (uint32_t) pa)); - - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - if (l2b == NULL) - l2b = pmap_grow_l2_bucket(kernel_pmap, va); - KASSERT(l2b != NULL, ("No L2 Bucket")); - pte = &l2b->l2b_kva[l2pte_index(va)]; - opte = *pte; - PDEBUG(1, printf("pmap_kenter: pte = %08x, opte = %08x, npte = %08x\n", - (uint32_t) pte, opte, *pte)); - if (l2pte_valid(opte)) { - pmap_kremove(va); - } else { - if (opte == 0) - l2b->l2b_occupancy++; - } - *pte = L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, - VM_PROT_READ | VM_PROT_WRITE); - if (flags & KENTER_CACHE) - *pte |= pte_l2_s_cache_mode; - if (flags & KENTER_USER) - *pte |= L2_S_PROT_U; - PTE_SYNC(pte); - - /* - * A kernel mapping may not be the page's only mapping, so create a PV - * entry to ensure proper caching. - * - * The existence test for the pvzone is used to delay the recording of - * kernel mappings until the VM system is fully initialized. - * - * This expects the physical memory to have a vm_page_array entry. - */ - if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) != NULL) { - rw_wlock(&pvh_global_lock); - if (!TAILQ_EMPTY(&m->md.pv_list) || m->md.pv_kva != 0) { - if ((pve = pmap_get_pv_entry()) == NULL) - panic("pmap_kenter_internal: no pv entries"); - PMAP_LOCK(kernel_pmap); - pmap_enter_pv(m, pve, kernel_pmap, va, - PVF_WRITE | PVF_UNMAN); - pmap_fix_cache(m, kernel_pmap, va); - PMAP_UNLOCK(kernel_pmap); - } else { - m->md.pv_kva = va; - } - rw_wunlock(&pvh_global_lock); - } -} - -void -pmap_kenter(vm_offset_t va, vm_paddr_t pa) -{ - pmap_kenter_internal(va, pa, KENTER_CACHE); -} - -void -pmap_kenter_nocache(vm_offset_t va, vm_paddr_t pa) -{ - - pmap_kenter_internal(va, pa, 0); -} - -void -pmap_kenter_device(vm_offset_t va, vm_size_t size, vm_paddr_t pa) -{ - vm_offset_t sva; - - KASSERT((size & PAGE_MASK) == 0, - ("%s: device mapping not page-sized", __func__)); - - sva = va; - while (size != 0) { - pmap_kenter_internal(va, pa, 0); - va += PAGE_SIZE; - pa += PAGE_SIZE; - size -= PAGE_SIZE; - } -} - -void -pmap_kremove_device(vm_offset_t va, vm_size_t size) -{ - vm_offset_t sva; - - KASSERT((size & PAGE_MASK) == 0, - ("%s: device mapping not page-sized", __func__)); - - sva = va; - while (size != 0) { - pmap_kremove(va); - va += PAGE_SIZE; - size -= PAGE_SIZE; - } -} - -void -pmap_kenter_user(vm_offset_t va, vm_paddr_t pa) -{ - - pmap_kenter_internal(va, pa, KENTER_CACHE|KENTER_USER); - /* - * Call pmap_fault_fixup now, to make sure we'll have no exception - * at the first use of the new address, or bad things will happen, - * as we use one of these addresses in the exception handlers. - */ - pmap_fault_fixup(kernel_pmap, va, VM_PROT_READ|VM_PROT_WRITE, 1); -} - -vm_paddr_t -pmap_kextract(vm_offset_t va) -{ - - return (pmap_extract_locked(kernel_pmap, va)); -} - -/* - * remove a page from the kernel pagetables - */ -void -pmap_kremove(vm_offset_t va) -{ - struct l2_bucket *l2b; - pt_entry_t *pte, opte; - struct pv_entry *pve; - vm_page_t m; - vm_offset_t pa; - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - if (!l2b) - return; - KASSERT(l2b != NULL, ("No L2 Bucket")); - pte = &l2b->l2b_kva[l2pte_index(va)]; - opte = *pte; - if (l2pte_valid(opte)) { - /* pa = vtophs(va) taken from pmap_extract() */ - if ((opte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (opte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (opte & L2_S_FRAME) | (va & L2_S_OFFSET); - /* note: should never have to remove an allocation - * before the pvzone is initialized. - */ - rw_wlock(&pvh_global_lock); - PMAP_LOCK(kernel_pmap); - if (pvzone != NULL && (m = vm_phys_paddr_to_vm_page(pa)) && - (pve = pmap_remove_pv(m, kernel_pmap, va))) - pmap_free_pv_entry(pve); - PMAP_UNLOCK(kernel_pmap); - rw_wunlock(&pvh_global_lock); - va = va & ~PAGE_MASK; - cpu_dcache_wbinv_range(va, PAGE_SIZE); - cpu_l2cache_wbinv_range(va, PAGE_SIZE); - cpu_tlb_flushD_SE(va); - cpu_cpwait(); - *pte = 0; - } -} - - -/* - * Used to map a range of physical addresses into kernel - * virtual address space. - * - * The value passed in '*virt' is a suggested virtual address for - * the mapping. Architectures which can support a direct-mapped - * physical to virtual region can return the appropriate address - * within that region, leaving '*virt' unchanged. Other - * architectures should map the pages starting at '*virt' and - * update '*virt' with the first usable address after the mapped - * region. - */ -vm_offset_t -pmap_map(vm_offset_t *virt, vm_offset_t start, vm_offset_t end, int prot) -{ - vm_offset_t sva = *virt; - vm_offset_t va = sva; - - PDEBUG(1, printf("pmap_map: virt = %08x, start = %08x, end = %08x, " - "prot = %d\n", (uint32_t) *virt, (uint32_t) start, (uint32_t) end, - prot)); - - while (start < end) { - pmap_kenter(va, start); - va += PAGE_SIZE; - start += PAGE_SIZE; - } - *virt = va; - return (sva); -} - -static void -pmap_wb_page(vm_page_t m) -{ - struct pv_entry *pv; - - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, FALSE, - (pv->pv_flags & PVF_WRITE) == 0); -} - -static void -pmap_inv_page(vm_page_t m) -{ - struct pv_entry *pv; - - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - pmap_dcache_wb_range(pv->pv_pmap, pv->pv_va, PAGE_SIZE, TRUE, TRUE); -} -/* - * Add a list of wired pages to the kva - * this routine is only used for temporary - * kernel mappings that do not need to have - * page modification or references recorded. - * Note that old mappings are simply written - * over. The page *must* be wired. - */ -void -pmap_qenter(vm_offset_t va, vm_page_t *m, int count) -{ - int i; - - for (i = 0; i < count; i++) { - pmap_wb_page(m[i]); - pmap_kenter_internal(va, VM_PAGE_TO_PHYS(m[i]), - KENTER_CACHE); - va += PAGE_SIZE; - } -} - - -/* - * this routine jerks page mappings from the - * kernel -- it is meant only for temporary mappings. - */ -void -pmap_qremove(vm_offset_t va, int count) -{ - vm_paddr_t pa; - int i; - - for (i = 0; i < count; i++) { - pa = vtophys(va); - if (pa) { - pmap_inv_page(PHYS_TO_VM_PAGE(pa)); - pmap_kremove(va); - } - va += PAGE_SIZE; - } -} - - -/* - * pmap_object_init_pt preloads the ptes for a given object - * into the specified pmap. This eliminates the blast of soft - * faults on process startup and immediately after an mmap. - */ -void -pmap_object_init_pt(pmap_t pmap, vm_offset_t addr, vm_object_t object, - vm_pindex_t pindex, vm_size_t size) -{ - - VM_OBJECT_ASSERT_WLOCKED(object); - KASSERT(object->type == OBJT_DEVICE || object->type == OBJT_SG, - ("pmap_object_init_pt: non-device object")); -} - - -/* - * pmap_is_prefaultable: - * - * Return whether or not the specified virtual address is elgible - * for prefault. - */ -boolean_t -pmap_is_prefaultable(pmap_t pmap, vm_offset_t addr) -{ - pd_entry_t *pde; - pt_entry_t *pte; - - if (!pmap_get_pde_pte(pmap, addr, &pde, &pte)) - return (FALSE); - KASSERT(pte != NULL, ("Valid mapping but no pte ?")); - if (*pte == 0) - return (TRUE); - return (FALSE); -} - -/* - * Fetch pointers to the PDE/PTE for the given pmap/VA pair. - * Returns TRUE if the mapping exists, else FALSE. - * - * NOTE: This function is only used by a couple of arm-specific modules. - * It is not safe to take any pmap locks here, since we could be right - * in the middle of debugging the pmap anyway... - * - * It is possible for this routine to return FALSE even though a valid - * mapping does exist. This is because we don't lock, so the metadata - * state may be inconsistent. - * - * NOTE: We can return a NULL *ptp in the case where the L1 pde is - * a "section" mapping. - */ -boolean_t -pmap_get_pde_pte(pmap_t pm, vm_offset_t va, pd_entry_t **pdp, pt_entry_t **ptp) -{ - struct l2_dtable *l2; - pd_entry_t *pl1pd, l1pd; - pt_entry_t *ptep; - u_short l1idx; - - if (pm->pm_l1 == NULL) - return (FALSE); - - l1idx = L1_IDX(va); - *pdp = pl1pd = &pm->pm_l1->l1_kva[l1idx]; - l1pd = *pl1pd; - - if (l1pte_section_p(l1pd)) { - *ptp = NULL; - return (TRUE); - } - - if (pm->pm_l2 == NULL) - return (FALSE); - - l2 = pm->pm_l2[L2_IDX(l1idx)]; - - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { - return (FALSE); - } - - *ptp = &ptep[l2pte_index(va)]; - return (TRUE); -} - -/* - * Routine: pmap_remove_all - * Function: - * Removes this physical page from - * all physical maps in which it resides. - * Reflects back modify bits to the pager. - * - * Notes: - * Original versions of this routine were very - * inefficient because they iteratively called - * pmap_remove (slow...) - */ -void -pmap_remove_all(vm_page_t m) -{ - pv_entry_t pv; - pt_entry_t *ptep; - struct l2_bucket *l2b; - boolean_t flush = FALSE; - pmap_t curpm; - int flags = 0; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_remove_all: page %p is not managed", m)); - if (TAILQ_EMPTY(&m->md.pv_list)) - return; - rw_wlock(&pvh_global_lock); - - /* - * XXX This call shouldn't exist. Iterating over the PV list twice, - * once in pmap_clearbit() and again below, is both unnecessary and - * inefficient. The below code should itself write back the cache - * entry before it destroys the mapping. - */ - pmap_clearbit(m, PVF_WRITE); - curpm = vmspace_pmap(curproc->p_vmspace); - while ((pv = TAILQ_FIRST(&m->md.pv_list)) != NULL) { - if (flush == FALSE && (pv->pv_pmap == curpm || - pv->pv_pmap == kernel_pmap)) - flush = TRUE; - - PMAP_LOCK(pv->pv_pmap); - /* - * Cached contents were written-back in pmap_clearbit(), - * but we still have to invalidate the cache entry to make - * sure stale data are not retrieved when another page will be - * mapped under this virtual address. - */ - if (pmap_is_current(pv->pv_pmap)) { - cpu_dcache_inv_range(pv->pv_va, PAGE_SIZE); - if (pmap_has_valid_mapping(pv->pv_pmap, pv->pv_va)) - cpu_l2cache_inv_range(pv->pv_va, PAGE_SIZE); - } - - if (pv->pv_flags & PVF_UNMAN) { - /* remove the pv entry, but do not remove the mapping - * and remember this is a kernel mapped page - */ - m->md.pv_kva = pv->pv_va; - } else { - /* remove the mapping and pv entry */ - l2b = pmap_get_l2_bucket(pv->pv_pmap, pv->pv_va); - KASSERT(l2b != NULL, ("No l2 bucket")); - ptep = &l2b->l2b_kva[l2pte_index(pv->pv_va)]; - *ptep = 0; - PTE_SYNC_CURRENT(pv->pv_pmap, ptep); - pmap_free_l2_bucket(pv->pv_pmap, l2b, 1); - pv->pv_pmap->pm_stats.resident_count--; - flags |= pv->pv_flags; - } - pmap_nuke_pv(m, pv->pv_pmap, pv); - PMAP_UNLOCK(pv->pv_pmap); - pmap_free_pv_entry(pv); - } - - if (flush) { - if (PV_BEEN_EXECD(flags)) - pmap_tlb_flushID(curpm); - else - pmap_tlb_flushD(curpm); - } - vm_page_aflag_clear(m, PGA_WRITEABLE); - rw_wunlock(&pvh_global_lock); -} - - -/* - * Set the physical protection on the - * specified range of this map as requested. - */ -void -pmap_protect(pmap_t pm, vm_offset_t sva, vm_offset_t eva, vm_prot_t prot) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - vm_offset_t next_bucket; - u_int flags; - int flush; - - CTR4(KTR_PMAP, "pmap_protect: pmap %p sva 0x%08x eva 0x%08x prot %x", - pm, sva, eva, prot); - - if ((prot & VM_PROT_READ) == 0) { - pmap_remove(pm, sva, eva); - return; - } - - if (prot & VM_PROT_WRITE) { - /* - * If this is a read->write transition, just ignore it and let - * vm_fault() take care of it later. - */ - return; - } - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - - /* - * OK, at this point, we know we're doing write-protect operation. - * If the pmap is active, write-back the range. - */ - pmap_dcache_wb_range(pm, sva, eva - sva, FALSE, FALSE); - - flush = ((eva - sva) >= (PAGE_SIZE * 4)) ? 0 : -1; - flags = 0; - - while (sva < eva) { - next_bucket = L2_NEXT_BUCKET(sva); - if (next_bucket > eva) - next_bucket = eva; - - l2b = pmap_get_l2_bucket(pm, sva); - if (l2b == NULL) { - sva = next_bucket; - continue; - } - - ptep = &l2b->l2b_kva[l2pte_index(sva)]; - - while (sva < next_bucket) { - if ((pte = *ptep) != 0 && (pte & L2_S_PROT_W) != 0) { - struct vm_page *pg; - u_int f; - - pg = PHYS_TO_VM_PAGE(l2pte_pa(pte)); - pte &= ~L2_S_PROT_W; - *ptep = pte; - PTE_SYNC(ptep); - - if (!(pg->oflags & VPO_UNMANAGED)) { - f = pmap_modify_pv(pg, pm, sva, - PVF_WRITE, 0); - if (f & PVF_WRITE) - vm_page_dirty(pg); - } else - f = 0; - - if (flush >= 0) { - flush++; - flags |= f; - } else - if (PV_BEEN_EXECD(f)) - pmap_tlb_flushID_SE(pm, sva); - else - if (PV_BEEN_REFD(f)) - pmap_tlb_flushD_SE(pm, sva); - } - - sva += PAGE_SIZE; - ptep++; - } - } - - - if (flush) { - if (PV_BEEN_EXECD(flags)) - pmap_tlb_flushID(pm); - else - if (PV_BEEN_REFD(flags)) - pmap_tlb_flushD(pm); - } - rw_wunlock(&pvh_global_lock); - - PMAP_UNLOCK(pm); -} - - -/* - * Insert the given physical page (p) at - * the specified virtual address (v) in the - * target physical map with the protection requested. - * - * If specified, the page will be wired down, meaning - * that the related pte can not be reclaimed. - * - * NB: This is the only routine which MAY NOT lazy-evaluate - * or lose information. That is, this routine must actually - * insert this page into the given map NOW. - */ - -int -pmap_enter(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - u_int flags, int8_t psind __unused) -{ - int rv; - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - rv = pmap_enter_locked(pmap, va, m, prot, flags); - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); - return (rv); -} - -/* - * The pvh global and pmap locks must be held. - */ -static int -pmap_enter_locked(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot, - u_int flags) -{ - struct l2_bucket *l2b = NULL; - struct vm_page *opg; - struct pv_entry *pve = NULL; - pt_entry_t *ptep, npte, opte; - u_int nflags; - u_int oflags; - vm_paddr_t pa; - - PMAP_ASSERT_LOCKED(pmap); - rw_assert(&pvh_global_lock, RA_WLOCKED); - if (va == vector_page) { - pa = systempage.pv_pa; - m = NULL; - } else { - if ((m->oflags & VPO_UNMANAGED) == 0 && !vm_page_xbusied(m)) - VM_OBJECT_ASSERT_LOCKED(m->object); - pa = VM_PAGE_TO_PHYS(m); - } - nflags = 0; - if (prot & VM_PROT_WRITE) - nflags |= PVF_WRITE; - if (prot & VM_PROT_EXECUTE) - nflags |= PVF_EXEC; - if ((flags & PMAP_ENTER_WIRED) != 0) - nflags |= PVF_WIRED; - PDEBUG(1, printf("pmap_enter: pmap = %08x, va = %08x, m = %08x, prot = %x, " - "flags = %x\n", (uint32_t) pmap, va, (uint32_t) m, prot, flags)); - - if (pmap == kernel_pmap) { - l2b = pmap_get_l2_bucket(pmap, va); - if (l2b == NULL) - l2b = pmap_grow_l2_bucket(pmap, va); - } else { -do_l2b_alloc: - l2b = pmap_alloc_l2_bucket(pmap, va); - if (l2b == NULL) { - if ((flags & PMAP_ENTER_NOSLEEP) == 0) { - PMAP_UNLOCK(pmap); - rw_wunlock(&pvh_global_lock); - VM_WAIT; - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - goto do_l2b_alloc; - } - return (KERN_RESOURCE_SHORTAGE); - } - } - - ptep = &l2b->l2b_kva[l2pte_index(va)]; - - opte = *ptep; - npte = pa; - oflags = 0; - if (opte) { - /* - * There is already a mapping at this address. - * If the physical address is different, lookup the - * vm_page. - */ - if (l2pte_pa(opte) != pa) - opg = PHYS_TO_VM_PAGE(l2pte_pa(opte)); - else - opg = m; - } else - opg = NULL; - - if ((prot & (VM_PROT_ALL)) || - (!m || m->md.pvh_attrs & PVF_REF)) { - /* - * - The access type indicates that we don't need - * to do referenced emulation. - * OR - * - The physical page has already been referenced - * so no need to re-do referenced emulation here. - */ - npte |= L2_S_PROTO; - - nflags |= PVF_REF; - - if (m && ((prot & VM_PROT_WRITE) != 0 || - (m->md.pvh_attrs & PVF_MOD))) { - /* - * This is a writable mapping, and the - * page's mod state indicates it has - * already been modified. Make it - * writable from the outset. - */ - nflags |= PVF_MOD; - if (!(m->md.pvh_attrs & PVF_MOD)) - vm_page_dirty(m); - } - if (m && opte) - vm_page_aflag_set(m, PGA_REFERENCED); - } else { - /* - * Need to do page referenced emulation. - */ - npte |= L2_TYPE_INV; - } - - if (prot & VM_PROT_WRITE) { - npte |= L2_S_PROT_W; - if (m != NULL && - (m->oflags & VPO_UNMANAGED) == 0) - vm_page_aflag_set(m, PGA_WRITEABLE); - } - if (m->md.pv_memattr != VM_MEMATTR_UNCACHEABLE) - npte |= pte_l2_s_cache_mode; - if (m && m == opg) { - /* - * We're changing the attrs of an existing mapping. - */ - oflags = pmap_modify_pv(m, pmap, va, - PVF_WRITE | PVF_EXEC | PVF_WIRED | - PVF_MOD | PVF_REF, nflags); - - /* - * We may need to flush the cache if we're - * doing rw-ro... - */ - if (pmap_is_current(pmap) && - (oflags & PVF_NC) == 0 && - (opte & L2_S_PROT_W) != 0 && - (prot & VM_PROT_WRITE) == 0 && - (opte & L2_TYPE_MASK) != L2_TYPE_INV) { - cpu_dcache_wb_range(va, PAGE_SIZE); - cpu_l2cache_wb_range(va, PAGE_SIZE); - } - } else { - /* - * New mapping, or changing the backing page - * of an existing mapping. - */ - if (opg) { - /* - * Replacing an existing mapping with a new one. - * It is part of our managed memory so we - * must remove it from the PV list - */ - if ((pve = pmap_remove_pv(opg, pmap, va))) { - - /* note for patch: the oflags/invalidation was moved - * because PG_FICTITIOUS pages could free the pve - */ - oflags = pve->pv_flags; - /* - * If the old mapping was valid (ref/mod - * emulation creates 'invalid' mappings - * initially) then make sure to frob - * the cache. - */ - if ((oflags & PVF_NC) == 0 && l2pte_valid(opte)) { - if (PV_BEEN_EXECD(oflags)) { - pmap_idcache_wbinv_range(pmap, va, - PAGE_SIZE); - } else - if (PV_BEEN_REFD(oflags)) { - pmap_dcache_wb_range(pmap, va, - PAGE_SIZE, TRUE, - (oflags & PVF_WRITE) == 0); - } - } - - /* free/allocate a pv_entry for UNMANAGED pages if - * this physical page is not/is already mapped. - */ - - if (m && (m->oflags & VPO_UNMANAGED) && - !m->md.pv_kva && - TAILQ_EMPTY(&m->md.pv_list)) { - pmap_free_pv_entry(pve); - pve = NULL; - } - } else if (m && - (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || - !TAILQ_EMPTY(&m->md.pv_list))) - pve = pmap_get_pv_entry(); - } else if (m && - (!(m->oflags & VPO_UNMANAGED) || m->md.pv_kva || - !TAILQ_EMPTY(&m->md.pv_list))) - pve = pmap_get_pv_entry(); - - if (m) { - if ((m->oflags & VPO_UNMANAGED)) { - if (!TAILQ_EMPTY(&m->md.pv_list) || - m->md.pv_kva) { - KASSERT(pve != NULL, ("No pv")); - nflags |= PVF_UNMAN; - pmap_enter_pv(m, pve, pmap, va, nflags); - } else - m->md.pv_kva = va; - } else { - KASSERT(va < kmi.clean_sva || - va >= kmi.clean_eva, - ("pmap_enter: managed mapping within the clean submap")); - KASSERT(pve != NULL, ("No pv")); - pmap_enter_pv(m, pve, pmap, va, nflags); - } - } - } - /* - * Make sure userland mappings get the right permissions - */ - if (pmap != kernel_pmap && va != vector_page) { - npte |= L2_S_PROT_U; - } - - /* - * Keep the stats up to date - */ - if (opte == 0) { - l2b->l2b_occupancy++; - pmap->pm_stats.resident_count++; - } - - /* - * If this is just a wiring change, the two PTEs will be - * identical, so there's no need to update the page table. - */ - if (npte != opte) { - boolean_t is_cached = pmap_is_current(pmap); - - *ptep = npte; - if (is_cached) { - /* - * We only need to frob the cache/tlb if this pmap - * is current - */ - PTE_SYNC(ptep); - if (L1_IDX(va) != L1_IDX(vector_page) && - l2pte_valid(npte)) { - /* - * This mapping is likely to be accessed as - * soon as we return to userland. Fix up the - * L1 entry to avoid taking another - * page/domain fault. - */ - pd_entry_t *pl1pd, l1pd; - - pl1pd = &pmap->pm_l1->l1_kva[L1_IDX(va)]; - l1pd = l2b->l2b_phys | L1_C_DOM(pmap->pm_domain) | - L1_C_PROTO; - if (*pl1pd != l1pd) { - *pl1pd = l1pd; - PTE_SYNC(pl1pd); - } - } - } - - if (PV_BEEN_EXECD(oflags)) - pmap_tlb_flushID_SE(pmap, va); - else if (PV_BEEN_REFD(oflags)) - pmap_tlb_flushD_SE(pmap, va); - - - if (m) - pmap_fix_cache(m, pmap, va); - } - return (KERN_SUCCESS); -} - -/* - * Maps a sequence of resident pages belonging to the same object. - * The sequence begins with the given page m_start. This page is - * mapped at the given virtual address start. Each subsequent page is - * mapped at a virtual address that is offset from start by the same - * amount as the page is offset from m_start within the object. The - * last page in the sequence is the page with the largest offset from - * m_start that can be mapped at a virtual address less than the given - * virtual address end. Not every virtual page between start and end - * is mapped; only those for which a resident page exists with the - * corresponding offset from m_start are mapped. - */ -void -pmap_enter_object(pmap_t pmap, vm_offset_t start, vm_offset_t end, - vm_page_t m_start, vm_prot_t prot) -{ - vm_page_t m; - vm_pindex_t diff, psize; - - VM_OBJECT_ASSERT_LOCKED(m_start->object); - - psize = atop(end - start); - m = m_start; - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - while (m != NULL && (diff = m->pindex - m_start->pindex) < psize) { - pmap_enter_locked(pmap, start + ptoa(diff), m, prot & - (VM_PROT_READ | VM_PROT_EXECUTE), PMAP_ENTER_NOSLEEP); - m = TAILQ_NEXT(m, listq); - } - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); -} - -/* - * this code makes some *MAJOR* assumptions: - * 1. Current pmap & pmap exists. - * 2. Not wired. - * 3. Read access. - * 4. No page table pages. - * but is *MUCH* faster than pmap_enter... - */ - -void -pmap_enter_quick(pmap_t pmap, vm_offset_t va, vm_page_t m, vm_prot_t prot) -{ - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - pmap_enter_locked(pmap, va, m, prot & (VM_PROT_READ | VM_PROT_EXECUTE), - PMAP_ENTER_NOSLEEP); - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); -} - -/* - * Clear the wired attribute from the mappings for the specified range of - * addresses in the given pmap. Every valid mapping within that range - * must have the wired attribute set. In contrast, invalid mappings - * cannot have the wired attribute set, so they are ignored. - * - * XXX Wired mappings of unmanaged pages cannot be counted by this pmap - * implementation. - */ -void -pmap_unwire(pmap_t pmap, vm_offset_t sva, vm_offset_t eva) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - pv_entry_t pv; - vm_offset_t next_bucket; - vm_page_t m; - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pmap); - while (sva < eva) { - next_bucket = L2_NEXT_BUCKET(sva); - if (next_bucket > eva) - next_bucket = eva; - l2b = pmap_get_l2_bucket(pmap, sva); - if (l2b == NULL) { - sva = next_bucket; - continue; - } - for (ptep = &l2b->l2b_kva[l2pte_index(sva)]; sva < next_bucket; - sva += PAGE_SIZE, ptep++) { - if ((pte = *ptep) == 0 || - (m = PHYS_TO_VM_PAGE(l2pte_pa(pte))) == NULL || - (m->oflags & VPO_UNMANAGED) != 0) - continue; - pv = pmap_find_pv(m, pmap, sva); - if ((pv->pv_flags & PVF_WIRED) == 0) - panic("pmap_unwire: pv %p isn't wired", pv); - pv->pv_flags &= ~PVF_WIRED; - pmap->pm_stats.wired_count--; - } - } - rw_wunlock(&pvh_global_lock); - PMAP_UNLOCK(pmap); -} - - -/* - * Copy the range specified by src_addr/len - * from the source map to the range dst_addr/len - * in the destination map. - * - * This routine is only advisory and need not do anything. - */ -void -pmap_copy(pmap_t dst_pmap, pmap_t src_pmap, vm_offset_t dst_addr, - vm_size_t len, vm_offset_t src_addr) -{ -} - - -/* - * Routine: pmap_extract - * Function: - * Extract the physical page address associated - * with the given map/virtual_address pair. - */ -vm_paddr_t -pmap_extract(pmap_t pmap, vm_offset_t va) -{ - vm_paddr_t pa; - - PMAP_LOCK(pmap); - pa = pmap_extract_locked(pmap, va); - PMAP_UNLOCK(pmap); - return (pa); -} - -static vm_paddr_t -pmap_extract_locked(pmap_t pmap, vm_offset_t va) -{ - struct l2_dtable *l2; - pd_entry_t l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - u_int l1idx; - - if (pmap != kernel_pmap) - PMAP_ASSERT_LOCKED(pmap); - l1idx = L1_IDX(va); - l1pd = pmap->pm_l1->l1_kva[l1idx]; - if (l1pte_section_p(l1pd)) { - /* - * These should only happen for the kernel pmap. - */ - KASSERT(pmap == kernel_pmap, ("unexpected section")); - /* XXX: what to do about the bits > 32 ? */ - if (l1pd & L1_S_SUPERSEC) - pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); - else - pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); - } else { - /* - * Note that we can't rely on the validity of the L1 - * descriptor as an indication that a mapping exists. - * We have to look it up in the L2 dtable. - */ - l2 = pmap->pm_l2[L2_IDX(l1idx)]; - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) - return (0); - pte = ptep[l2pte_index(va)]; - if (pte == 0) - return (0); - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); - } - return (pa); -} - -/* - * Atomically extract and hold the physical page with the given - * pmap and virtual address pair if that mapping permits the given - * protection. - * - */ -vm_page_t -pmap_extract_and_hold(pmap_t pmap, vm_offset_t va, vm_prot_t prot) -{ - struct l2_dtable *l2; - pd_entry_t l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa, paddr; - vm_page_t m = NULL; - u_int l1idx; - l1idx = L1_IDX(va); - paddr = 0; - - PMAP_LOCK(pmap); -retry: - l1pd = pmap->pm_l1->l1_kva[l1idx]; - if (l1pte_section_p(l1pd)) { - /* - * These should only happen for kernel_pmap - */ - KASSERT(pmap == kernel_pmap, ("huh")); - /* XXX: what to do about the bits > 32 ? */ - if (l1pd & L1_S_SUPERSEC) - pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); - else - pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); - if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) - goto retry; - if (l1pd & L1_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { - m = PHYS_TO_VM_PAGE(pa); - vm_page_hold(m); - } - - } else { - /* - * Note that we can't rely on the validity of the L1 - * descriptor as an indication that a mapping exists. - * We have to look it up in the L2 dtable. - */ - l2 = pmap->pm_l2[L2_IDX(l1idx)]; - - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { - PMAP_UNLOCK(pmap); - return (NULL); - } - - ptep = &ptep[l2pte_index(va)]; - pte = *ptep; - - if (pte == 0) { - PMAP_UNLOCK(pmap); - return (NULL); - } - if (pte & L2_S_PROT_W || (prot & VM_PROT_WRITE) == 0) { - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); - if (vm_page_pa_tryrelock(pmap, pa & PG_FRAME, &paddr)) - goto retry; - m = PHYS_TO_VM_PAGE(pa); - vm_page_hold(m); - } - } - - PMAP_UNLOCK(pmap); - PA_UNLOCK_COND(paddr); - return (m); -} - -vm_paddr_t -pmap_dump_kextract(vm_offset_t va, pt2_entry_t *pte2p) -{ - struct l2_dtable *l2; - pd_entry_t l1pd; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - u_int l1idx; - - l1idx = L1_IDX(va); - l1pd = kernel_pmap->pm_l1->l1_kva[l1idx]; - if (l1pte_section_p(l1pd)) { - if (l1pd & L1_S_SUPERSEC) - pa = (l1pd & L1_SUP_FRAME) | (va & L1_SUP_OFFSET); - else - pa = (l1pd & L1_S_FRAME) | (va & L1_S_OFFSET); - pte = L2_S_PROTO | pa | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ | VM_PROT_WRITE); - } else { - l2 = kernel_pmap->pm_l2[L2_IDX(l1idx)]; - if (l2 == NULL || - (ptep = l2->l2_bucket[L2_BUCKET(l1idx)].l2b_kva) == NULL) { - pte = 0; - pa = 0; - goto out; - } - pte = ptep[l2pte_index(va)]; - if (pte == 0) { - pa = 0; - goto out; - } - if ((pte & L2_TYPE_MASK) == L2_TYPE_L) - pa = (pte & L2_L_FRAME) | (va & L2_L_OFFSET); - else - pa = (pte & L2_S_FRAME) | (va & L2_S_OFFSET); - } -out: - if (pte2p != NULL) - *pte2p = pte; - return (pa); -} - -/* - * Initialize a preallocated and zeroed pmap structure, - * such as one in a vmspace structure. - */ - -int -pmap_pinit(pmap_t pmap) -{ - PDEBUG(1, printf("pmap_pinit: pmap = %08x\n", (uint32_t) pmap)); - - pmap_alloc_l1(pmap); - bzero(pmap->pm_l2, sizeof(pmap->pm_l2)); - - CPU_ZERO(&pmap->pm_active); - - TAILQ_INIT(&pmap->pm_pvlist); - bzero(&pmap->pm_stats, sizeof pmap->pm_stats); - pmap->pm_stats.resident_count = 1; - if (vector_page < KERNBASE) { - pmap_enter(pmap, vector_page, PHYS_TO_VM_PAGE(systempage.pv_pa), - VM_PROT_READ, PMAP_ENTER_WIRED | VM_PROT_READ, 0); - } - return (1); -} - - -/*************************************************** - * page management routines. - ***************************************************/ - - -static void -pmap_free_pv_entry(pv_entry_t pv) -{ - pv_entry_count--; - uma_zfree(pvzone, pv); -} - - -/* - * get a new pv_entry, allocating a block from the system - * when needed. - * the memory allocation is performed bypassing the malloc code - * because of the possibility of allocations at interrupt time. - */ -static pv_entry_t -pmap_get_pv_entry(void) -{ - pv_entry_t ret_value; - - pv_entry_count++; - if (pv_entry_count > pv_entry_high_water) - pagedaemon_wakeup(); - ret_value = uma_zalloc(pvzone, M_NOWAIT); - return ret_value; -} - -/* - * Remove the given range of addresses from the specified map. - * - * It is assumed that the start and end are properly - * rounded to the page size. - */ -#define PMAP_REMOVE_CLEAN_LIST_SIZE 3 -void -pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t eva) -{ - struct l2_bucket *l2b; - vm_offset_t next_bucket; - pt_entry_t *ptep; - u_int total; - u_int mappings, is_exec, is_refd; - int flushall = 0; - - - /* - * we lock in the pmap => pv_head direction - */ - - rw_wlock(&pvh_global_lock); - PMAP_LOCK(pm); - total = 0; - while (sva < eva) { - /* - * Do one L2 bucket's worth at a time. - */ - next_bucket = L2_NEXT_BUCKET(sva); - if (next_bucket > eva) - next_bucket = eva; - - l2b = pmap_get_l2_bucket(pm, sva); - if (l2b == NULL) { - sva = next_bucket; - continue; - } - - ptep = &l2b->l2b_kva[l2pte_index(sva)]; - mappings = 0; - - while (sva < next_bucket) { - struct vm_page *pg; - pt_entry_t pte; - vm_paddr_t pa; - - pte = *ptep; - - if (pte == 0) { - /* - * Nothing here, move along - */ - sva += PAGE_SIZE; - ptep++; - continue; - } - - pm->pm_stats.resident_count--; - pa = l2pte_pa(pte); - is_exec = 0; - is_refd = 1; - - /* - * Update flags. In a number of circumstances, - * we could cluster a lot of these and do a - * number of sequential pages in one go. - */ - if ((pg = PHYS_TO_VM_PAGE(pa)) != NULL) { - struct pv_entry *pve; - - pve = pmap_remove_pv(pg, pm, sva); - if (pve) { - is_exec = PV_BEEN_EXECD(pve->pv_flags); - is_refd = PV_BEEN_REFD(pve->pv_flags); - pmap_free_pv_entry(pve); - } - } - - if (l2pte_valid(pte) && pmap_is_current(pm)) { - if (total < PMAP_REMOVE_CLEAN_LIST_SIZE) { - total++; - if (is_exec) { - cpu_idcache_wbinv_range(sva, - PAGE_SIZE); - cpu_l2cache_wbinv_range(sva, - PAGE_SIZE); - cpu_tlb_flushID_SE(sva); - } else if (is_refd) { - cpu_dcache_wbinv_range(sva, - PAGE_SIZE); - cpu_l2cache_wbinv_range(sva, - PAGE_SIZE); - cpu_tlb_flushD_SE(sva); - } - } else if (total == PMAP_REMOVE_CLEAN_LIST_SIZE) { - /* flushall will also only get set for - * for a current pmap - */ - cpu_idcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - flushall = 1; - total++; - } - } - *ptep = 0; - PTE_SYNC(ptep); - - sva += PAGE_SIZE; - ptep++; - mappings++; - } - - pmap_free_l2_bucket(pm, l2b, mappings); - } - - rw_wunlock(&pvh_global_lock); - if (flushall) - cpu_tlb_flushID(); - PMAP_UNLOCK(pm); -} - -/* - * pmap_zero_page() - * - * Zero a given physical page by mapping it at a page hook point. - * In doing the zero page op, the page we zero is mapped cachable, as with - * StrongARM accesses to non-cached pages are non-burst making writing - * _any_ bulk data very slow. - */ -#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_CORE3) -void -pmap_zero_page_generic(vm_paddr_t phys, int off, int size) -{ - - if (_arm_bzero && size >= _min_bzero_size && - _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) - return; - - mtx_lock(&cmtx); - /* - * Hook in the page, zero it, invalidate the TLB as needed. - * - * Note the temporary zero-page mapping must be a non-cached page in - * order to work without corruption when write-allocate is enabled. - */ - *cdst_pte = L2_S_PROTO | phys | L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE); - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - if (off || size != PAGE_SIZE) - bzero((void *)(cdstp + off), size); - else - bzero_page(cdstp); - - mtx_unlock(&cmtx); -} -#endif /* ARM_MMU_GENERIC != 0 */ - -#if ARM_MMU_XSCALE == 1 -void -pmap_zero_page_xscale(vm_paddr_t phys, int off, int size) -{ - - if (_arm_bzero && size >= _min_bzero_size && - _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) - return; - - mtx_lock(&cmtx); - /* - * Hook in the page, zero it, and purge the cache for that - * zeroed page. Invalidate the TLB as needed. - */ - *cdst_pte = L2_S_PROTO | phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - if (off || size != PAGE_SIZE) - bzero((void *)(cdstp + off), size); - else - bzero_page(cdstp); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} - -/* - * Change the PTEs for the specified kernel mappings such that they - * will use the mini data cache instead of the main data cache. - */ -void -pmap_use_minicache(vm_offset_t va, vm_size_t size) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, *sptep, pte; - vm_offset_t next_bucket, eva; - -#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3) - if (xscale_use_minidata == 0) - return; -#endif - - eva = va + size; - - while (va < eva) { - next_bucket = L2_NEXT_BUCKET(va); - if (next_bucket > eva) - next_bucket = eva; - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - - sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; - - while (va < next_bucket) { - pte = *ptep; - if (!l2pte_minidata(pte)) { - cpu_dcache_wbinv_range(va, PAGE_SIZE); - cpu_tlb_flushD_SE(va); - *ptep = pte & ~L2_B; - } - ptep++; - va += PAGE_SIZE; - } - PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); - } - cpu_cpwait(); -} -#endif /* ARM_MMU_XSCALE == 1 */ - -/* - * pmap_zero_page zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. - */ -void -pmap_zero_page(vm_page_t m) -{ - pmap_zero_page_func(VM_PAGE_TO_PHYS(m), 0, PAGE_SIZE); -} - - -/* - * pmap_zero_page_area zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. - * - * off and size may not cover an area beyond a single hardware page. - */ -void -pmap_zero_page_area(vm_page_t m, int off, int size) -{ - - pmap_zero_page_func(VM_PAGE_TO_PHYS(m), off, size); -} - - -/* - * pmap_zero_page_idle zeros the specified hardware page by mapping - * the page into KVM and using bzero to clear its contents. This - * is intended to be called from the vm_pagezero process only and - * outside of Giant. - */ -void -pmap_zero_page_idle(vm_page_t m) -{ - - pmap_zero_page(m); -} - -#if 0 -/* - * pmap_clean_page() - * - * This is a local function used to work out the best strategy to clean - * a single page referenced by its entry in the PV table. It should be used by - * pmap_copy_page, pmap_zero page and maybe some others later on. - * - * Its policy is effectively: - * o If there are no mappings, we don't bother doing anything with the cache. - * o If there is one mapping, we clean just that page. - * o If there are multiple mappings, we clean the entire cache. - * - * So that some functions can be further optimised, it returns 0 if it didn't - * clean the entire cache, or 1 if it did. - * - * XXX One bug in this routine is that if the pv_entry has a single page - * mapped at 0x00000000 a whole cache clean will be performed rather than - * just the 1 page. Since this should not occur in everyday use and if it does - * it will just result in not the most efficient clean for the page. - * - * We don't yet use this function but may want to. - */ -static int -pmap_clean_page(struct pv_entry *pv, boolean_t is_src) -{ - pmap_t pm, pm_to_clean = NULL; - struct pv_entry *npv; - u_int cache_needs_cleaning = 0; - u_int flags = 0; - vm_offset_t page_to_clean = 0; - - if (pv == NULL) { - /* nothing mapped in so nothing to flush */ - return (0); - } - - /* - * Since we flush the cache each time we change to a different - * user vmspace, we only need to flush the page if it is in the - * current pmap. - */ - if (curthread) - pm = vmspace_pmap(curproc->p_vmspace); - else - pm = kernel_pmap; - - for (npv = pv; npv; npv = TAILQ_NEXT(npv, pv_list)) { - if (npv->pv_pmap == kernel_pmap || npv->pv_pmap == pm) { - flags |= npv->pv_flags; - /* - * The page is mapped non-cacheable in - * this map. No need to flush the cache. - */ - if (npv->pv_flags & PVF_NC) { -#ifdef DIAGNOSTIC - if (cache_needs_cleaning) - panic("pmap_clean_page: " - "cache inconsistency"); -#endif - break; - } else if (is_src && (npv->pv_flags & PVF_WRITE) == 0) - continue; - if (cache_needs_cleaning) { - page_to_clean = 0; - break; - } else { - page_to_clean = npv->pv_va; - pm_to_clean = npv->pv_pmap; - } - cache_needs_cleaning = 1; - } - } - if (page_to_clean) { - if (PV_BEEN_EXECD(flags)) - pmap_idcache_wbinv_range(pm_to_clean, page_to_clean, - PAGE_SIZE); - else - pmap_dcache_wb_range(pm_to_clean, page_to_clean, - PAGE_SIZE, !is_src, (flags & PVF_WRITE) == 0); - } else if (cache_needs_cleaning) { - if (PV_BEEN_EXECD(flags)) - pmap_idcache_wbinv_all(pm); - else - pmap_dcache_wbinv_all(pm); - return (1); - } - return (0); -} -#endif - -/* - * pmap_copy_page copies the specified (machine independent) - * page by mapping the page into virtual memory and using - * bcopy to copy the page, one machine dependent page at a - * time. - */ - -/* - * pmap_copy_page() - * - * Copy one physical page into another, by mapping the pages into - * hook points. The same comment regarding cachability as in - * pmap_zero_page also applies here. - */ -#if ARM_MMU_GENERIC != 0 || defined (CPU_XSCALE_CORE3) -void -pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) -{ -#if 0 - struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); -#endif - - /* - * Clean the source page. Hold the source page's lock for - * the duration of the copy so that no other mappings can - * be created while we have a potentially aliased mapping. - */ -#if 0 - /* - * XXX: Not needed while we call cpu_dcache_wbinv_all() in - * pmap_copy_page(). - */ - (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); -#endif - /* - * Map the pages into the page hook points, copy them, and purge - * the cache for the appropriate page. Invalidate the TLB - * as required. - */ - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | src | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | dst | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy_page(csrcp, cdstp); - mtx_unlock(&cmtx); - cpu_dcache_inv_range(csrcp, PAGE_SIZE); - cpu_dcache_wbinv_range(cdstp, PAGE_SIZE); - cpu_l2cache_inv_range(csrcp, PAGE_SIZE); - cpu_l2cache_wbinv_range(cdstp, PAGE_SIZE); -} - -void -pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) -{ - - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | a_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | pte_l2_s_cache_mode; - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | b_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | pte_l2_s_cache_mode; - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); - mtx_unlock(&cmtx); - cpu_dcache_inv_range(csrcp + a_offs, cnt); - cpu_dcache_wbinv_range(cdstp + b_offs, cnt); - cpu_l2cache_inv_range(csrcp + a_offs, cnt); - cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); -} -#endif /* ARM_MMU_GENERIC != 0 */ - -#if ARM_MMU_XSCALE == 1 -void -pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) -{ -#if 0 - /* XXX: Only needed for pmap_clean_page(), which is commented out. */ - struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); -#endif - - /* - * Clean the source page. Hold the source page's lock for - * the duration of the copy so that no other mappings can - * be created while we have a potentially aliased mapping. - */ -#if 0 - /* - * XXX: Not needed while we call cpu_dcache_wbinv_all() in - * pmap_copy_page(). - */ - (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); -#endif - /* - * Map the pages into the page hook points, copy them, and purge - * the cache for the appropriate page. Invalidate the TLB - * as required. - */ - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | src | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | dst | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy_page(csrcp, cdstp); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} - -void -pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) -{ - - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | a_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | b_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} -#endif /* ARM_MMU_XSCALE == 1 */ - -void -pmap_copy_page(vm_page_t src, vm_page_t dst) -{ - - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - if (_arm_memcpy && PAGE_SIZE >= _min_memcpy_size && - _arm_memcpy((void *)VM_PAGE_TO_PHYS(dst), - (void *)VM_PAGE_TO_PHYS(src), PAGE_SIZE, IS_PHYSICAL) == 0) - return; - pmap_copy_page_func(VM_PAGE_TO_PHYS(src), VM_PAGE_TO_PHYS(dst)); -} - -/* - * We have code to do unmapped I/O. However, it isn't quite right and - * causes un-page-aligned I/O to devices to fail (most notably newfs - * or fsck). We give up a little performance to not allow unmapped I/O - * to gain stability. - */ -int unmapped_buf_allowed = 0; - -void -pmap_copy_pages(vm_page_t ma[], vm_offset_t a_offset, vm_page_t mb[], - vm_offset_t b_offset, int xfersize) -{ - vm_page_t a_pg, b_pg; - vm_offset_t a_pg_offset, b_pg_offset; - int cnt; - - cpu_dcache_wbinv_all(); - cpu_l2cache_wbinv_all(); - while (xfersize > 0) { - a_pg = ma[a_offset >> PAGE_SHIFT]; - a_pg_offset = a_offset & PAGE_MASK; - cnt = min(xfersize, PAGE_SIZE - a_pg_offset); - b_pg = mb[b_offset >> PAGE_SHIFT]; - b_pg_offset = b_offset & PAGE_MASK; - cnt = min(cnt, PAGE_SIZE - b_pg_offset); - pmap_copy_page_offs_func(VM_PAGE_TO_PHYS(a_pg), a_pg_offset, - VM_PAGE_TO_PHYS(b_pg), b_pg_offset, cnt); - xfersize -= cnt; - a_offset += cnt; - b_offset += cnt; - } -} - -vm_offset_t -pmap_quick_enter_page(vm_page_t m) -{ - /* - * Don't bother with a PCPU pageframe, since we don't support - * SMP for anything pre-armv7. Use pmap_kenter() to ensure - * caching is handled correctly for multiple mappings of the - * same physical page. - */ - - mtx_assert(&qmap_mtx, MA_NOTOWNED); - mtx_lock(&qmap_mtx); - - pmap_kenter(qmap_addr, VM_PAGE_TO_PHYS(m)); - - return (qmap_addr); -} - -void -pmap_quick_remove_page(vm_offset_t addr) -{ - KASSERT(addr == qmap_addr, - ("pmap_quick_remove_page: invalid address")); - mtx_assert(&qmap_mtx, MA_OWNED); - pmap_kremove(addr); - mtx_unlock(&qmap_mtx); -} - -/* - * this routine returns true if a physical page resides - * in the given pmap. - */ -boolean_t -pmap_page_exists_quick(pmap_t pmap, vm_page_t m) -{ - pv_entry_t pv; - int loops = 0; - boolean_t rv; - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_page_exists_quick: page %p is not managed", m)); - rv = FALSE; - rw_wlock(&pvh_global_lock); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) { - if (pv->pv_pmap == pmap) { - rv = TRUE; - break; - } - loops++; - if (loops >= 16) - break; - } - rw_wunlock(&pvh_global_lock); - return (rv); -} - -/* - * pmap_page_wired_mappings: - * - * Return the number of managed mappings to the given physical page - * that are wired. - */ -int -pmap_page_wired_mappings(vm_page_t m) -{ - pv_entry_t pv; - int count; - - count = 0; - if ((m->oflags & VPO_UNMANAGED) != 0) - return (count); - rw_wlock(&pvh_global_lock); - TAILQ_FOREACH(pv, &m->md.pv_list, pv_list) - if ((pv->pv_flags & PVF_WIRED) != 0) - count++; - rw_wunlock(&pvh_global_lock); - return (count); -} - -/* - * This function is advisory. - */ -void -pmap_advise(pmap_t pmap, vm_offset_t sva, vm_offset_t eva, int advice) -{ -} - -/* - * pmap_ts_referenced: - * - * Return the count of reference bits for a page, clearing all of them. - */ -int -pmap_ts_referenced(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_ts_referenced: page %p is not managed", m)); - return (pmap_clearbit(m, PVF_REF)); -} - - -boolean_t -pmap_is_modified(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_is_modified: page %p is not managed", m)); - if (m->md.pvh_attrs & PVF_MOD) - return (TRUE); - - return(FALSE); -} - - -/* - * Clear the modify bits on the specified physical page. - */ -void -pmap_clear_modify(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_clear_modify: page %p is not managed", m)); - VM_OBJECT_ASSERT_WLOCKED(m->object); - KASSERT(!vm_page_xbusied(m), - ("pmap_clear_modify: page %p is exclusive busied", m)); - - /* - * If the page is not PGA_WRITEABLE, then no mappings can be modified. - * If the object containing the page is locked and the page is not - * exclusive busied, then PGA_WRITEABLE cannot be concurrently set. - */ - if ((m->aflags & PGA_WRITEABLE) == 0) - return; - if (m->md.pvh_attrs & PVF_MOD) - pmap_clearbit(m, PVF_MOD); -} - - -/* - * pmap_is_referenced: - * - * Return whether or not the specified physical page was referenced - * in any physical maps. - */ -boolean_t -pmap_is_referenced(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_is_referenced: page %p is not managed", m)); - return ((m->md.pvh_attrs & PVF_REF) != 0); -} - - -/* - * Clear the write and modified bits in each of the given page's mappings. - */ -void -pmap_remove_write(vm_page_t m) -{ - - KASSERT((m->oflags & VPO_UNMANAGED) == 0, - ("pmap_remove_write: page %p is not managed", m)); - - /* - * If the page is not exclusive busied, then PGA_WRITEABLE cannot be - * set by another thread while the object is locked. Thus, - * if PGA_WRITEABLE is clear, no page table entries need updating. - */ - VM_OBJECT_ASSERT_WLOCKED(m->object); - if (vm_page_xbusied(m) || (m->aflags & PGA_WRITEABLE) != 0) - pmap_clearbit(m, PVF_WRITE); -} - - -/* - * perform the pmap work for mincore - */ -int -pmap_mincore(pmap_t pmap, vm_offset_t addr, vm_paddr_t *locked_pa) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, pte; - vm_paddr_t pa; - vm_page_t m; - int val; - boolean_t managed; - - PMAP_LOCK(pmap); -retry: - l2b = pmap_get_l2_bucket(pmap, addr); - if (l2b == NULL) { - val = 0; - goto out; - } - ptep = &l2b->l2b_kva[l2pte_index(addr)]; - pte = *ptep; - if (!l2pte_valid(pte)) { - val = 0; - goto out; - } - val = MINCORE_INCORE; - if (pte & L2_S_PROT_W) - val |= MINCORE_MODIFIED | MINCORE_MODIFIED_OTHER; - managed = false; - pa = l2pte_pa(pte); - m = PHYS_TO_VM_PAGE(pa); - if (m != NULL && !(m->oflags & VPO_UNMANAGED)) - managed = true; - if (managed) { - /* - * The ARM pmap tries to maintain a per-mapping - * reference bit. The trouble is that it's kept in - * the PV entry, not the PTE, so it's costly to access - * here. You would need to acquire the pvh global - * lock, call pmap_find_pv(), and introduce a custom - * version of vm_page_pa_tryrelock() that releases and - * reacquires the pvh global lock. In the end, I - * doubt it's worthwhile. This may falsely report - * the given address as referenced. - */ - if ((m->md.pvh_attrs & PVF_REF) != 0) - val |= MINCORE_REFERENCED | MINCORE_REFERENCED_OTHER; - } - if ((val & (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER)) != - (MINCORE_MODIFIED_OTHER | MINCORE_REFERENCED_OTHER) && managed) { - /* Ensure that "PHYS_TO_VM_PAGE(pa)->object" doesn't change. */ - if (vm_page_pa_tryrelock(pmap, pa, locked_pa)) - goto retry; - } else -out: - PA_UNLOCK_COND(*locked_pa); - PMAP_UNLOCK(pmap); - return (val); -} - - -void -pmap_sync_icache(pmap_t pm, vm_offset_t va, vm_size_t sz) -{ -} - - -/* - * Increase the starting virtual address of the given mapping if a - * different alignment might result in more superpage mappings. - */ -void -pmap_align_superpage(vm_object_t object, vm_ooffset_t offset, - vm_offset_t *addr, vm_size_t size) -{ -} - -#define BOOTSTRAP_DEBUG - -/* - * pmap_map_section: - * - * Create a single section mapping. - */ -void -pmap_map_section(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, - int prot, int cache) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pd_entry_t fl; - - KASSERT(((va | pa) & L1_S_OFFSET) == 0, ("ouin2")); - - switch (cache) { - case PTE_NOCACHE: - default: - fl = 0; - break; - - case PTE_CACHE: - fl = pte_l1_s_cache_mode; - break; - - case PTE_PAGETABLE: - fl = pte_l1_s_cache_mode_pt; - break; - } - - pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | - L1_S_PROT(PTE_KERNEL, prot) | fl | L1_S_DOM(PMAP_DOMAIN_KERNEL); - PTE_SYNC(&pde[va >> L1_S_SHIFT]); - -} - -/* - * pmap_link_l2pt: - * - * Link the L2 page table specified by l2pv.pv_pa into the L1 - * page table at the slot for "va". - */ -void -pmap_link_l2pt(vm_offset_t l1pt, vm_offset_t va, struct pv_addr *l2pv) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt, proto; - u_int slot = va >> L1_S_SHIFT; - - proto = L1_S_DOM(PMAP_DOMAIN_KERNEL) | L1_C_PROTO; - -#ifdef VERBOSE_INIT_ARM - printf("pmap_link_l2pt: pa=0x%x va=0x%x\n", l2pv->pv_pa, l2pv->pv_va); -#endif - - pde[slot + 0] = proto | (l2pv->pv_pa + 0x000); - - PTE_SYNC(&pde[slot]); - - SLIST_INSERT_HEAD(&kernel_pt_list, l2pv, pv_list); - - -} - -/* - * pmap_map_entry - * - * Create a single page mapping. - */ -void -pmap_map_entry(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, int prot, - int cache) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pt_entry_t fl; - pt_entry_t *pte; - - KASSERT(((va | pa) & PAGE_MASK) == 0, ("ouin")); - - switch (cache) { - case PTE_NOCACHE: - default: - fl = 0; - break; - - case PTE_CACHE: - fl = pte_l2_s_cache_mode; - break; - - case PTE_PAGETABLE: - fl = pte_l2_s_cache_mode_pt; - break; - } - - if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) - panic("pmap_map_entry: no L2 table for VA 0x%08x", va); - - pte = (pt_entry_t *) kernel_pt_lookup(pde[L1_IDX(va)] & L1_C_ADDR_MASK); - - if (pte == NULL) - panic("pmap_map_entry: can't find L2 table for VA 0x%08x", va); - - pte[l2pte_index(va)] = - L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | fl; - PTE_SYNC(&pte[l2pte_index(va)]); -} - -/* - * pmap_map_chunk: - * - * Map a chunk of memory using the most efficient mappings - * possible (section. large page, small page) into the - * provided L1 and L2 tables at the specified virtual address. - */ -vm_size_t -pmap_map_chunk(vm_offset_t l1pt, vm_offset_t va, vm_offset_t pa, - vm_size_t size, int prot, int cache) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pt_entry_t *pte, f1, f2s, f2l; - vm_size_t resid; - int i; - - resid = (size + (PAGE_SIZE - 1)) & ~(PAGE_SIZE - 1); - - if (l1pt == 0) - panic("pmap_map_chunk: no L1 table provided"); - -#ifdef VERBOSE_INIT_ARM - printf("pmap_map_chunk: pa=0x%x va=0x%x size=0x%x resid=0x%x " - "prot=0x%x cache=%d\n", pa, va, size, resid, prot, cache); -#endif - - switch (cache) { - case PTE_NOCACHE: - default: - f1 = 0; - f2l = 0; - f2s = 0; - break; - - case PTE_CACHE: - f1 = pte_l1_s_cache_mode; - f2l = pte_l2_l_cache_mode; - f2s = pte_l2_s_cache_mode; - break; - - case PTE_PAGETABLE: - f1 = pte_l1_s_cache_mode_pt; - f2l = pte_l2_l_cache_mode_pt; - f2s = pte_l2_s_cache_mode_pt; - break; - } - - size = resid; - - while (resid > 0) { - /* See if we can use a section mapping. */ - if (L1_S_MAPPABLE_P(va, pa, resid)) { -#ifdef VERBOSE_INIT_ARM - printf("S"); -#endif - pde[va >> L1_S_SHIFT] = L1_S_PROTO | pa | - L1_S_PROT(PTE_KERNEL, prot) | f1 | - L1_S_DOM(PMAP_DOMAIN_KERNEL); - PTE_SYNC(&pde[va >> L1_S_SHIFT]); - va += L1_S_SIZE; - pa += L1_S_SIZE; - resid -= L1_S_SIZE; - continue; - } - - /* - * Ok, we're going to use an L2 table. Make sure - * one is actually in the corresponding L1 slot - * for the current VA. - */ - if ((pde[va >> L1_S_SHIFT] & L1_TYPE_MASK) != L1_TYPE_C) - panic("pmap_map_chunk: no L2 table for VA 0x%08x", va); - - pte = (pt_entry_t *) kernel_pt_lookup( - pde[L1_IDX(va)] & L1_C_ADDR_MASK); - if (pte == NULL) - panic("pmap_map_chunk: can't find L2 table for VA" - "0x%08x", va); - /* See if we can use a L2 large page mapping. */ - if (L2_L_MAPPABLE_P(va, pa, resid)) { -#ifdef VERBOSE_INIT_ARM - printf("L"); -#endif - for (i = 0; i < 16; i++) { - pte[l2pte_index(va) + i] = - L2_L_PROTO | pa | - L2_L_PROT(PTE_KERNEL, prot) | f2l; - PTE_SYNC(&pte[l2pte_index(va) + i]); - } - va += L2_L_SIZE; - pa += L2_L_SIZE; - resid -= L2_L_SIZE; - continue; - } - - /* Use a small page mapping. */ -#ifdef VERBOSE_INIT_ARM - printf("P"); -#endif - pte[l2pte_index(va)] = - L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, prot) | f2s; - PTE_SYNC(&pte[l2pte_index(va)]); - va += PAGE_SIZE; - pa += PAGE_SIZE; - resid -= PAGE_SIZE; - } -#ifdef VERBOSE_INIT_ARM - printf("\n"); -#endif - return (size); - -} - -void -pmap_page_set_memattr(vm_page_t m, vm_memattr_t ma) -{ - /* - * Remember the memattr in a field that gets used to set the appropriate - * bits in the PTEs as mappings are established. - */ - m->md.pv_memattr = ma; - - /* - * It appears that this function can only be called before any mappings - * for the page are established on ARM. If this ever changes, this code - * will need to walk the pv_list and make each of the existing mappings - * uncacheable, being careful to sync caches and PTEs (and maybe - * invalidate TLB?) for any current mapping it modifies. - */ - if (m->md.pv_kva != 0 || TAILQ_FIRST(&m->md.pv_list) != NULL) - panic("Can't change memattr on page with existing mappings"); -} - - diff --git a/sys/arm/arm/trap-v4.c b/sys/arm/arm/trap-v4.c new file mode 100644 index 000000000000..eb4d68304b33 --- /dev/null +++ b/sys/arm/arm/trap-v4.c @@ -0,0 +1,737 @@ +/* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */ + +/*- + * Copyright 2004 Olivier Houchard + * Copyright 2003 Wasabi Systems, Inc. + * All rights reserved. + * + * Written by Steve C. Woodford for Wasabi Systems, Inc. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed for the NetBSD Project by + * Wasabi Systems, Inc. + * 4. The name of Wasabi Systems, Inc. may not be used to endorse + * or promote products derived from this software without specific prior + * written permission. + * + * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND + * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED + * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN + * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) + * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + */ +/*- + * Copyright (c) 1994-1997 Mark Brinicombe. + * Copyright (c) 1994 Brini. + * All rights reserved. + * + * This code is derived from software written for Brini by Mark Brinicombe + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. All advertising materials mentioning features or use of this software + * must display the following acknowledgement: + * This product includes software developed by Brini. + * 4. The name of the company nor the name of the author may be used to + * endorse or promote products derived from this software without specific + * prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY + * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF + * SUCH DAMAGE. + * + * RiscBSD kernel project + * + * fault.c + * + * Fault handlers + * + * Created : 28/11/94 + */ + +#include +__FBSDID("$FreeBSD$"); + +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include + +#ifdef KDB +#include +#endif + +#ifdef KDTRACE_HOOKS +#include +#endif + +#define ReadWord(a) (*((volatile unsigned int *)(a))) + +#ifdef DEBUG +int last_fault_code; /* For the benefit of pmap_fault_fixup() */ +#endif + +struct ksig { + int signb; + u_long code; +}; +struct data_abort { + int (*func)(struct trapframe *, u_int, u_int, struct thread *, + struct ksig *); + const char *desc; +}; + +static int dab_fatal(struct trapframe *, u_int, u_int, struct thread *, + struct ksig *); +static int dab_align(struct trapframe *, u_int, u_int, struct thread *, + struct ksig *); +static int dab_buserr(struct trapframe *, u_int, u_int, struct thread *, + struct ksig *); +static void prefetch_abort_handler(struct trapframe *); + +static const struct data_abort data_aborts[] = { + {dab_fatal, "Vector Exception"}, + {dab_align, "Alignment Fault 1"}, + {dab_fatal, "Terminal Exception"}, + {dab_align, "Alignment Fault 3"}, + {dab_buserr, "External Linefetch Abort (S)"}, + {NULL, "Translation Fault (S)"}, +#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 + {NULL, "Translation Flag Fault"}, +#else + {dab_buserr, "External Linefetch Abort (P)"}, +#endif + {NULL, "Translation Fault (P)"}, + {dab_buserr, "External Non-Linefetch Abort (S)"}, + {NULL, "Domain Fault (S)"}, + {dab_buserr, "External Non-Linefetch Abort (P)"}, + {NULL, "Domain Fault (P)"}, + {dab_buserr, "External Translation Abort (L1)"}, + {NULL, "Permission Fault (S)"}, + {dab_buserr, "External Translation Abort (L2)"}, + {NULL, "Permission Fault (P)"} +}; + +/* Determine if a fault came from user mode */ +#define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE) + +/* Determine if 'x' is a permission fault */ +#define IS_PERMISSION_FAULT(x) \ + (((1 << ((x) & FAULT_TYPE_MASK)) & \ + ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) + +static __inline void +call_trapsignal(struct thread *td, int sig, u_long code) +{ + ksiginfo_t ksi; + + ksiginfo_init_trap(&ksi); + ksi.ksi_signo = sig; + ksi.ksi_code = (int)code; + trapsignal(td, &ksi); +} + +void +abort_handler(struct trapframe *tf, int type) +{ + struct vm_map *map; + struct pcb *pcb; + struct thread *td; + u_int user, far, fsr; + vm_prot_t ftype; + void *onfault; + vm_offset_t va; + int error = 0; + struct ksig ksig; + struct proc *p; + + if (type == 1) + return (prefetch_abort_handler(tf)); + + /* Grab FAR/FSR before enabling interrupts */ + far = cpu_faultaddress(); + fsr = cpu_faultstatus(); +#if 0 + printf("data abort: fault address=%p (from pc=%p lr=%p)\n", + (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr); +#endif + + /* Update vmmeter statistics */ +#if 0 + vmexp.traps++; +#endif + + td = curthread; + p = td->td_proc; + + PCPU_INC(cnt.v_trap); + /* Data abort came from user mode? */ + user = TRAP_USERMODE(tf); + + if (user) { + td->td_pticks = 0; + td->td_frame = tf; + if (td->td_cowgen != td->td_proc->p_cowgen) + thread_cow_update(td); + + } + /* Grab the current pcb */ + pcb = td->td_pcb; + /* Re-enable interrupts if they were enabled previously */ + if (td->td_md.md_spinlock_count == 0) { + if (__predict_true(tf->tf_spsr & PSR_I) == 0) + enable_interrupts(PSR_I); + if (__predict_true(tf->tf_spsr & PSR_F) == 0) + enable_interrupts(PSR_F); + } + + + /* Invoke the appropriate handler, if necessary */ + if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { + if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, + td, &ksig)) { + goto do_trapsignal; + } + goto out; + } + + /* + * At this point, we're dealing with one of the following data aborts: + * + * FAULT_TRANS_S - Translation -- Section + * FAULT_TRANS_P - Translation -- Page + * FAULT_DOMAIN_S - Domain -- Section + * FAULT_DOMAIN_P - Domain -- Page + * FAULT_PERM_S - Permission -- Section + * FAULT_PERM_P - Permission -- Page + * + * These are the main virtual memory-related faults signalled by + * the MMU. + */ + + /* + * Make sure the Program Counter is sane. We could fall foul of + * someone executing Thumb code, in which case the PC might not + * be word-aligned. This would cause a kernel alignment fault + * further down if we have to decode the current instruction. + * XXX: It would be nice to be able to support Thumb at some point. + */ + if (__predict_false((tf->tf_pc & 3) != 0)) { + if (user) { + /* + * Give the user an illegal instruction signal. + */ + /* Deliver a SIGILL to the process */ + ksig.signb = SIGILL; + ksig.code = 0; + goto do_trapsignal; + } + + /* + * The kernel never executes Thumb code. + */ + printf("\ndata_abort_fault: Misaligned Kernel-mode " + "Program Counter\n"); + dab_fatal(tf, fsr, far, td, &ksig); + } + + va = trunc_page((vm_offset_t)far); + + /* + * It is only a kernel address space fault iff: + * 1. user == 0 and + * 2. pcb_onfault not set or + * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. + */ + if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || + (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && + __predict_true((pcb->pcb_onfault == NULL || + (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { + map = kernel_map; + + /* Was the fault due to the FPE/IPKDB ? */ + if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { + + /* + * Force exit via userret() + * This is necessary as the FPE is an extension to + * userland that actually runs in a priveledged mode + * but uses USR mode permissions for its accesses. + */ + user = 1; + ksig.signb = SIGSEGV; + ksig.code = 0; + goto do_trapsignal; + } + } else { + map = &td->td_proc->p_vmspace->vm_map; + } + + /* + * We need to know whether the page should be mapped as R or R/W. On + * armv6 and later the fault status register indicates whether the + * access was a read or write. Prior to armv6, we know that a + * permission fault can only be the result of a write to a read-only + * location, so we can deal with those quickly. Otherwise we need to + * disassemble the faulting instruction to determine if it was a write. + */ +#if __ARM_ARCH >= 6 + ftype = (fsr & FAULT_WNR) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; +#else + if (IS_PERMISSION_FAULT(fsr)) + ftype = VM_PROT_WRITE; + else { + u_int insn = ReadWord(tf->tf_pc); + + if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ + ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ + ((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */ + ftype = VM_PROT_WRITE; + } else { + if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ + ftype = VM_PROT_READ | VM_PROT_WRITE; + else + ftype = VM_PROT_READ; + } + } +#endif + + /* + * See if the fault is as a result of ref/mod emulation, + * or domain mismatch. + */ +#ifdef DEBUG + last_fault_code = fsr; +#endif + if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, + NULL, "Kernel page fault") != 0) + goto fatal_pagefault; + + if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, + user)) { + goto out; + } + + onfault = pcb->pcb_onfault; + pcb->pcb_onfault = NULL; + error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); + pcb->pcb_onfault = onfault; + if (__predict_true(error == 0)) + goto out; +fatal_pagefault: + if (user == 0) { + if (pcb->pcb_onfault) { + tf->tf_r0 = error; + tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; + return; + } + + printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, + error); + dab_fatal(tf, fsr, far, td, &ksig); + } + + + if (error == ENOMEM) { + printf("VM: pid %d (%s), uid %d killed: " + "out of swap\n", td->td_proc->p_pid, td->td_name, + (td->td_proc->p_ucred) ? + td->td_proc->p_ucred->cr_uid : -1); + ksig.signb = SIGKILL; + } else { + ksig.signb = SIGSEGV; + } + ksig.code = 0; +do_trapsignal: + call_trapsignal(td, ksig.signb, ksig.code); +out: + /* If returning to user mode, make sure to invoke userret() */ + if (user) + userret(td, tf); +} + +/* + * dab_fatal() handles the following data aborts: + * + * FAULT_WRTBUF_0 - Vector Exception + * FAULT_WRTBUF_1 - Terminal Exception + * + * We should never see these on a properly functioning system. + * + * This function is also called by the other handlers if they + * detect a fatal problem. + * + * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. + */ +static int +dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, + struct ksig *ksig) +{ + const char *mode; + +#ifdef KDTRACE_HOOKS + if (!TRAP_USERMODE(tf)) { + if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK)) + return (0); + } +#endif + + mode = TRAP_USERMODE(tf) ? "user" : "kernel"; + + disable_interrupts(PSR_I|PSR_F); + if (td != NULL) { + printf("Fatal %s mode data abort: '%s'\n", mode, + data_aborts[fsr & FAULT_TYPE_MASK].desc); + printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); + if ((fsr & FAULT_IMPRECISE) == 0) + printf("%08x, ", far); + else + printf("Invalid, "); + printf("spsr=%08x\n", tf->tf_spsr); + } else { + printf("Fatal %s mode prefetch abort at 0x%08x\n", + mode, tf->tf_pc); + printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); + } + + printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", + tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); + printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", + tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); + printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", + tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); + printf("r12=%08x, ", tf->tf_r12); + + if (TRAP_USERMODE(tf)) + printf("usp=%08x, ulr=%08x", + tf->tf_usr_sp, tf->tf_usr_lr); + else + printf("ssp=%08x, slr=%08x", + tf->tf_svc_sp, tf->tf_svc_lr); + printf(", pc =%08x\n\n", tf->tf_pc); + +#ifdef KDB + if (debugger_on_panic || kdb_active) + if (kdb_trap(fsr, 0, tf)) + return (0); +#endif + panic("Fatal abort"); + /*NOTREACHED*/ +} + +/* + * dab_align() handles the following data aborts: + * + * FAULT_ALIGN_0 - Alignment fault + * FAULT_ALIGN_1 - Alignment fault + * + * These faults are fatal if they happen in kernel mode. Otherwise, we + * deliver a bus error to the process. + */ +static int +dab_align(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, + struct ksig *ksig) +{ + + /* Alignment faults are always fatal if they occur in kernel mode */ + if (!TRAP_USERMODE(tf)) { + if (!td || !td->td_pcb->pcb_onfault) + dab_fatal(tf, fsr, far, td, ksig); + tf->tf_r0 = EFAULT; + tf->tf_pc = (int)td->td_pcb->pcb_onfault; + return (0); + } + + /* pcb_onfault *must* be NULL at this point */ + + /* Deliver a bus error signal to the process */ + ksig->code = 0; + ksig->signb = SIGBUS; + td->td_frame = tf; + + return (1); +} + +/* + * dab_buserr() handles the following data aborts: + * + * FAULT_BUSERR_0 - External Abort on Linefetch -- Section + * FAULT_BUSERR_1 - External Abort on Linefetch -- Page + * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section + * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page + * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 + * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 + * + * If pcb_onfault is set, flag the fault and return to the handler. + * If the fault occurred in user mode, give the process a SIGBUS. + * + * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 + * can be flagged as imprecise in the FSR. This causes a real headache + * since some of the machine state is lost. In this case, tf->tf_pc + * may not actually point to the offending instruction. In fact, if + * we've taken a double abort fault, it generally points somewhere near + * the top of "data_abort_entry" in exception.S. + * + * In all other cases, these data aborts are considered fatal. + */ +static int +dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, + struct ksig *ksig) +{ + struct pcb *pcb = td->td_pcb; + +#ifdef __XSCALE__ + if ((fsr & FAULT_IMPRECISE) != 0 && + (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { + /* + * Oops, an imprecise, double abort fault. We've lost the + * r14_abt/spsr_abt values corresponding to the original + * abort, and the spsr saved in the trapframe indicates + * ABT mode. + */ + tf->tf_spsr &= ~PSR_MODE; + + /* + * We use a simple heuristic to determine if the double abort + * happened as a result of a kernel or user mode access. + * If the current trapframe is at the top of the kernel stack, + * the fault _must_ have come from user mode. + */ + if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) { + /* + * Kernel mode. We're either about to die a + * spectacular death, or pcb_onfault will come + * to our rescue. Either way, the current value + * of tf->tf_pc is irrelevant. + */ + tf->tf_spsr |= PSR_SVC32_MODE; + if (pcb->pcb_onfault == NULL) + printf("\nKernel mode double abort!\n"); + } else { + /* + * User mode. We've lost the program counter at the + * time of the fault (not that it was accurate anyway; + * it's not called an imprecise fault for nothing). + * About all we can do is copy r14_usr to tf_pc and + * hope for the best. The process is about to get a + * SIGBUS, so it's probably history anyway. + */ + tf->tf_spsr |= PSR_USR32_MODE; + tf->tf_pc = tf->tf_usr_lr; + } + } + + /* FAR is invalid for imprecise exceptions */ + if ((fsr & FAULT_IMPRECISE) != 0) + far = 0; +#endif /* __XSCALE__ */ + + if (pcb->pcb_onfault) { + tf->tf_r0 = EFAULT; + tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; + return (0); + } + + /* + * At this point, if the fault happened in kernel mode, we're toast + */ + if (!TRAP_USERMODE(tf)) + dab_fatal(tf, fsr, far, td, ksig); + + /* Deliver a bus error signal to the process */ + ksig->signb = SIGBUS; + ksig->code = 0; + td->td_frame = tf; + + return (1); +} + +/* + * void prefetch_abort_handler(struct trapframe *tf) + * + * Abort handler called when instruction execution occurs at + * a non existent or restricted (access permissions) memory page. + * If the address is invalid and we were in SVC mode then panic as + * the kernel should never prefetch abort. + * If the address is invalid and the page is mapped then the user process + * does no have read permission so send it a signal. + * Otherwise fault the page in and try again. + */ +static void +prefetch_abort_handler(struct trapframe *tf) +{ + struct thread *td; + struct proc * p; + struct vm_map *map; + vm_offset_t fault_pc, va; + int error = 0; + struct ksig ksig; + + +#if 0 + /* Update vmmeter statistics */ + uvmexp.traps++; +#endif +#if 0 + printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc, + (void*)tf->tf_usr_lr); +#endif + + td = curthread; + p = td->td_proc; + PCPU_INC(cnt.v_trap); + + if (TRAP_USERMODE(tf)) { + td->td_frame = tf; + if (td->td_cowgen != td->td_proc->p_cowgen) + thread_cow_update(td); + } + fault_pc = tf->tf_pc; + if (td->td_md.md_spinlock_count == 0) { + if (__predict_true(tf->tf_spsr & PSR_I) == 0) + enable_interrupts(PSR_I); + if (__predict_true(tf->tf_spsr & PSR_F) == 0) + enable_interrupts(PSR_F); + } + + /* Prefetch aborts cannot happen in kernel mode */ + if (__predict_false(!TRAP_USERMODE(tf))) + dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); + td->td_pticks = 0; + + + /* Ok validate the address, can only execute in USER space */ + if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || + (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { + ksig.signb = SIGSEGV; + ksig.code = 0; + goto do_trapsignal; + } + + map = &td->td_proc->p_vmspace->vm_map; + va = trunc_page(fault_pc); + + /* + * See if the pmap can handle this fault on its own... + */ +#ifdef DEBUG + last_fault_code = -1; +#endif + if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) + goto out; + + error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE, + VM_FAULT_NORMAL); + if (__predict_true(error == 0)) + goto out; + + if (error == ENOMEM) { + printf("VM: pid %d (%s), uid %d killed: " + "out of swap\n", td->td_proc->p_pid, td->td_name, + (td->td_proc->p_ucred) ? + td->td_proc->p_ucred->cr_uid : -1); + ksig.signb = SIGKILL; + } else { + ksig.signb = SIGSEGV; + } + ksig.code = 0; + +do_trapsignal: + call_trapsignal(td, ksig.signb, ksig.code); + +out: + userret(td, tf); + +} + +extern int badaddr_read_1(const uint8_t *, uint8_t *); +extern int badaddr_read_2(const uint16_t *, uint16_t *); +extern int badaddr_read_4(const uint32_t *, uint32_t *); +/* + * Tentatively read an 8, 16, or 32-bit value from 'addr'. + * If the read succeeds, the value is written to 'rptr' and zero is returned. + * Else, return EFAULT. + */ +int +badaddr_read(void *addr, size_t size, void *rptr) +{ + union { + uint8_t v1; + uint16_t v2; + uint32_t v4; + } u; + int rv; + + cpu_drain_writebuf(); + + /* Read from the test address. */ + switch (size) { + case sizeof(uint8_t): + rv = badaddr_read_1(addr, &u.v1); + if (rv == 0 && rptr) + *(uint8_t *) rptr = u.v1; + break; + + case sizeof(uint16_t): + rv = badaddr_read_2(addr, &u.v2); + if (rv == 0 && rptr) + *(uint16_t *) rptr = u.v2; + break; + + case sizeof(uint32_t): + rv = badaddr_read_4(addr, &u.v4); + if (rv == 0 && rptr) + *(uint32_t *) rptr = u.v4; + break; + + default: + panic("badaddr: invalid size (%lu)", (u_long) size); + } + + /* Return EFAULT if the address was invalid, else zero */ + return (rv); +} diff --git a/sys/arm/arm/trap.c b/sys/arm/arm/trap.c deleted file mode 100644 index eb4d68304b33..000000000000 --- a/sys/arm/arm/trap.c +++ /dev/null @@ -1,737 +0,0 @@ -/* $NetBSD: fault.c,v 1.45 2003/11/20 14:44:36 scw Exp $ */ - -/*- - * Copyright 2004 Olivier Houchard - * Copyright 2003 Wasabi Systems, Inc. - * All rights reserved. - * - * Written by Steve C. Woodford for Wasabi Systems, Inc. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed for the NetBSD Project by - * Wasabi Systems, Inc. - * 4. The name of Wasabi Systems, Inc. may not be used to endorse - * or promote products derived from this software without specific prior - * written permission. - * - * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED - * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC - * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR - * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF - * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS - * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN - * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) - * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE - * POSSIBILITY OF SUCH DAMAGE. - */ -/*- - * Copyright (c) 1994-1997 Mark Brinicombe. - * Copyright (c) 1994 Brini. - * All rights reserved. - * - * This code is derived from software written for Brini by Mark Brinicombe - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. All advertising materials mentioning features or use of this software - * must display the following acknowledgement: - * This product includes software developed by Brini. - * 4. The name of the company nor the name of the author may be used to - * endorse or promote products derived from this software without specific - * prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY BRINI ``AS IS'' AND ANY EXPRESS OR IMPLIED - * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF - * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. - * IN NO EVENT SHALL BRINI OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, - * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR - * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - * - * RiscBSD kernel project - * - * fault.c - * - * Fault handlers - * - * Created : 28/11/94 - */ - -#include -__FBSDID("$FreeBSD$"); - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#ifdef KDB -#include -#endif - -#ifdef KDTRACE_HOOKS -#include -#endif - -#define ReadWord(a) (*((volatile unsigned int *)(a))) - -#ifdef DEBUG -int last_fault_code; /* For the benefit of pmap_fault_fixup() */ -#endif - -struct ksig { - int signb; - u_long code; -}; -struct data_abort { - int (*func)(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); - const char *desc; -}; - -static int dab_fatal(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); -static int dab_align(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); -static int dab_buserr(struct trapframe *, u_int, u_int, struct thread *, - struct ksig *); -static void prefetch_abort_handler(struct trapframe *); - -static const struct data_abort data_aborts[] = { - {dab_fatal, "Vector Exception"}, - {dab_align, "Alignment Fault 1"}, - {dab_fatal, "Terminal Exception"}, - {dab_align, "Alignment Fault 3"}, - {dab_buserr, "External Linefetch Abort (S)"}, - {NULL, "Translation Fault (S)"}, -#if (ARM_MMU_V6 + ARM_MMU_V7) != 0 - {NULL, "Translation Flag Fault"}, -#else - {dab_buserr, "External Linefetch Abort (P)"}, -#endif - {NULL, "Translation Fault (P)"}, - {dab_buserr, "External Non-Linefetch Abort (S)"}, - {NULL, "Domain Fault (S)"}, - {dab_buserr, "External Non-Linefetch Abort (P)"}, - {NULL, "Domain Fault (P)"}, - {dab_buserr, "External Translation Abort (L1)"}, - {NULL, "Permission Fault (S)"}, - {dab_buserr, "External Translation Abort (L2)"}, - {NULL, "Permission Fault (P)"} -}; - -/* Determine if a fault came from user mode */ -#define TRAP_USERMODE(tf) ((tf->tf_spsr & PSR_MODE) == PSR_USR32_MODE) - -/* Determine if 'x' is a permission fault */ -#define IS_PERMISSION_FAULT(x) \ - (((1 << ((x) & FAULT_TYPE_MASK)) & \ - ((1 << FAULT_PERM_P) | (1 << FAULT_PERM_S))) != 0) - -static __inline void -call_trapsignal(struct thread *td, int sig, u_long code) -{ - ksiginfo_t ksi; - - ksiginfo_init_trap(&ksi); - ksi.ksi_signo = sig; - ksi.ksi_code = (int)code; - trapsignal(td, &ksi); -} - -void -abort_handler(struct trapframe *tf, int type) -{ - struct vm_map *map; - struct pcb *pcb; - struct thread *td; - u_int user, far, fsr; - vm_prot_t ftype; - void *onfault; - vm_offset_t va; - int error = 0; - struct ksig ksig; - struct proc *p; - - if (type == 1) - return (prefetch_abort_handler(tf)); - - /* Grab FAR/FSR before enabling interrupts */ - far = cpu_faultaddress(); - fsr = cpu_faultstatus(); -#if 0 - printf("data abort: fault address=%p (from pc=%p lr=%p)\n", - (void*)far, (void*)tf->tf_pc, (void*)tf->tf_svc_lr); -#endif - - /* Update vmmeter statistics */ -#if 0 - vmexp.traps++; -#endif - - td = curthread; - p = td->td_proc; - - PCPU_INC(cnt.v_trap); - /* Data abort came from user mode? */ - user = TRAP_USERMODE(tf); - - if (user) { - td->td_pticks = 0; - td->td_frame = tf; - if (td->td_cowgen != td->td_proc->p_cowgen) - thread_cow_update(td); - - } - /* Grab the current pcb */ - pcb = td->td_pcb; - /* Re-enable interrupts if they were enabled previously */ - if (td->td_md.md_spinlock_count == 0) { - if (__predict_true(tf->tf_spsr & PSR_I) == 0) - enable_interrupts(PSR_I); - if (__predict_true(tf->tf_spsr & PSR_F) == 0) - enable_interrupts(PSR_F); - } - - - /* Invoke the appropriate handler, if necessary */ - if (__predict_false(data_aborts[fsr & FAULT_TYPE_MASK].func != NULL)) { - if ((data_aborts[fsr & FAULT_TYPE_MASK].func)(tf, fsr, far, - td, &ksig)) { - goto do_trapsignal; - } - goto out; - } - - /* - * At this point, we're dealing with one of the following data aborts: - * - * FAULT_TRANS_S - Translation -- Section - * FAULT_TRANS_P - Translation -- Page - * FAULT_DOMAIN_S - Domain -- Section - * FAULT_DOMAIN_P - Domain -- Page - * FAULT_PERM_S - Permission -- Section - * FAULT_PERM_P - Permission -- Page - * - * These are the main virtual memory-related faults signalled by - * the MMU. - */ - - /* - * Make sure the Program Counter is sane. We could fall foul of - * someone executing Thumb code, in which case the PC might not - * be word-aligned. This would cause a kernel alignment fault - * further down if we have to decode the current instruction. - * XXX: It would be nice to be able to support Thumb at some point. - */ - if (__predict_false((tf->tf_pc & 3) != 0)) { - if (user) { - /* - * Give the user an illegal instruction signal. - */ - /* Deliver a SIGILL to the process */ - ksig.signb = SIGILL; - ksig.code = 0; - goto do_trapsignal; - } - - /* - * The kernel never executes Thumb code. - */ - printf("\ndata_abort_fault: Misaligned Kernel-mode " - "Program Counter\n"); - dab_fatal(tf, fsr, far, td, &ksig); - } - - va = trunc_page((vm_offset_t)far); - - /* - * It is only a kernel address space fault iff: - * 1. user == 0 and - * 2. pcb_onfault not set or - * 3. pcb_onfault set and not LDRT/LDRBT/STRT/STRBT instruction. - */ - if (user == 0 && (va >= VM_MIN_KERNEL_ADDRESS || - (va < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW)) && - __predict_true((pcb->pcb_onfault == NULL || - (ReadWord(tf->tf_pc) & 0x05200000) != 0x04200000))) { - map = kernel_map; - - /* Was the fault due to the FPE/IPKDB ? */ - if (__predict_false((tf->tf_spsr & PSR_MODE)==PSR_UND32_MODE)) { - - /* - * Force exit via userret() - * This is necessary as the FPE is an extension to - * userland that actually runs in a priveledged mode - * but uses USR mode permissions for its accesses. - */ - user = 1; - ksig.signb = SIGSEGV; - ksig.code = 0; - goto do_trapsignal; - } - } else { - map = &td->td_proc->p_vmspace->vm_map; - } - - /* - * We need to know whether the page should be mapped as R or R/W. On - * armv6 and later the fault status register indicates whether the - * access was a read or write. Prior to armv6, we know that a - * permission fault can only be the result of a write to a read-only - * location, so we can deal with those quickly. Otherwise we need to - * disassemble the faulting instruction to determine if it was a write. - */ -#if __ARM_ARCH >= 6 - ftype = (fsr & FAULT_WNR) ? VM_PROT_READ | VM_PROT_WRITE : VM_PROT_READ; -#else - if (IS_PERMISSION_FAULT(fsr)) - ftype = VM_PROT_WRITE; - else { - u_int insn = ReadWord(tf->tf_pc); - - if (((insn & 0x0c100000) == 0x04000000) || /* STR/STRB */ - ((insn & 0x0e1000b0) == 0x000000b0) || /* STRH/STRD */ - ((insn & 0x0a100000) == 0x08000000)) { /* STM/CDT */ - ftype = VM_PROT_WRITE; - } else { - if ((insn & 0x0fb00ff0) == 0x01000090) /* SWP */ - ftype = VM_PROT_READ | VM_PROT_WRITE; - else - ftype = VM_PROT_READ; - } - } -#endif - - /* - * See if the fault is as a result of ref/mod emulation, - * or domain mismatch. - */ -#ifdef DEBUG - last_fault_code = fsr; -#endif - if (td->td_critnest != 0 || WITNESS_CHECK(WARN_SLEEPOK | WARN_GIANTOK, - NULL, "Kernel page fault") != 0) - goto fatal_pagefault; - - if (pmap_fault_fixup(vmspace_pmap(td->td_proc->p_vmspace), va, ftype, - user)) { - goto out; - } - - onfault = pcb->pcb_onfault; - pcb->pcb_onfault = NULL; - error = vm_fault(map, va, ftype, VM_FAULT_NORMAL); - pcb->pcb_onfault = onfault; - if (__predict_true(error == 0)) - goto out; -fatal_pagefault: - if (user == 0) { - if (pcb->pcb_onfault) { - tf->tf_r0 = error; - tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; - return; - } - - printf("\nvm_fault(%p, %x, %x, 0) -> %x\n", map, va, ftype, - error); - dab_fatal(tf, fsr, far, td, &ksig); - } - - - if (error == ENOMEM) { - printf("VM: pid %d (%s), uid %d killed: " - "out of swap\n", td->td_proc->p_pid, td->td_name, - (td->td_proc->p_ucred) ? - td->td_proc->p_ucred->cr_uid : -1); - ksig.signb = SIGKILL; - } else { - ksig.signb = SIGSEGV; - } - ksig.code = 0; -do_trapsignal: - call_trapsignal(td, ksig.signb, ksig.code); -out: - /* If returning to user mode, make sure to invoke userret() */ - if (user) - userret(td, tf); -} - -/* - * dab_fatal() handles the following data aborts: - * - * FAULT_WRTBUF_0 - Vector Exception - * FAULT_WRTBUF_1 - Terminal Exception - * - * We should never see these on a properly functioning system. - * - * This function is also called by the other handlers if they - * detect a fatal problem. - * - * Note: If 'l' is NULL, we assume we're dealing with a prefetch abort. - */ -static int -dab_fatal(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, - struct ksig *ksig) -{ - const char *mode; - -#ifdef KDTRACE_HOOKS - if (!TRAP_USERMODE(tf)) { - if (dtrace_trap_func != NULL && (*dtrace_trap_func)(tf, far & FAULT_TYPE_MASK)) - return (0); - } -#endif - - mode = TRAP_USERMODE(tf) ? "user" : "kernel"; - - disable_interrupts(PSR_I|PSR_F); - if (td != NULL) { - printf("Fatal %s mode data abort: '%s'\n", mode, - data_aborts[fsr & FAULT_TYPE_MASK].desc); - printf("trapframe: %p\nFSR=%08x, FAR=", tf, fsr); - if ((fsr & FAULT_IMPRECISE) == 0) - printf("%08x, ", far); - else - printf("Invalid, "); - printf("spsr=%08x\n", tf->tf_spsr); - } else { - printf("Fatal %s mode prefetch abort at 0x%08x\n", - mode, tf->tf_pc); - printf("trapframe: %p, spsr=%08x\n", tf, tf->tf_spsr); - } - - printf("r0 =%08x, r1 =%08x, r2 =%08x, r3 =%08x\n", - tf->tf_r0, tf->tf_r1, tf->tf_r2, tf->tf_r3); - printf("r4 =%08x, r5 =%08x, r6 =%08x, r7 =%08x\n", - tf->tf_r4, tf->tf_r5, tf->tf_r6, tf->tf_r7); - printf("r8 =%08x, r9 =%08x, r10=%08x, r11=%08x\n", - tf->tf_r8, tf->tf_r9, tf->tf_r10, tf->tf_r11); - printf("r12=%08x, ", tf->tf_r12); - - if (TRAP_USERMODE(tf)) - printf("usp=%08x, ulr=%08x", - tf->tf_usr_sp, tf->tf_usr_lr); - else - printf("ssp=%08x, slr=%08x", - tf->tf_svc_sp, tf->tf_svc_lr); - printf(", pc =%08x\n\n", tf->tf_pc); - -#ifdef KDB - if (debugger_on_panic || kdb_active) - if (kdb_trap(fsr, 0, tf)) - return (0); -#endif - panic("Fatal abort"); - /*NOTREACHED*/ -} - -/* - * dab_align() handles the following data aborts: - * - * FAULT_ALIGN_0 - Alignment fault - * FAULT_ALIGN_1 - Alignment fault - * - * These faults are fatal if they happen in kernel mode. Otherwise, we - * deliver a bus error to the process. - */ -static int -dab_align(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, - struct ksig *ksig) -{ - - /* Alignment faults are always fatal if they occur in kernel mode */ - if (!TRAP_USERMODE(tf)) { - if (!td || !td->td_pcb->pcb_onfault) - dab_fatal(tf, fsr, far, td, ksig); - tf->tf_r0 = EFAULT; - tf->tf_pc = (int)td->td_pcb->pcb_onfault; - return (0); - } - - /* pcb_onfault *must* be NULL at this point */ - - /* Deliver a bus error signal to the process */ - ksig->code = 0; - ksig->signb = SIGBUS; - td->td_frame = tf; - - return (1); -} - -/* - * dab_buserr() handles the following data aborts: - * - * FAULT_BUSERR_0 - External Abort on Linefetch -- Section - * FAULT_BUSERR_1 - External Abort on Linefetch -- Page - * FAULT_BUSERR_2 - External Abort on Non-linefetch -- Section - * FAULT_BUSERR_3 - External Abort on Non-linefetch -- Page - * FAULT_BUSTRNL1 - External abort on Translation -- Level 1 - * FAULT_BUSTRNL2 - External abort on Translation -- Level 2 - * - * If pcb_onfault is set, flag the fault and return to the handler. - * If the fault occurred in user mode, give the process a SIGBUS. - * - * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 - * can be flagged as imprecise in the FSR. This causes a real headache - * since some of the machine state is lost. In this case, tf->tf_pc - * may not actually point to the offending instruction. In fact, if - * we've taken a double abort fault, it generally points somewhere near - * the top of "data_abort_entry" in exception.S. - * - * In all other cases, these data aborts are considered fatal. - */ -static int -dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct thread *td, - struct ksig *ksig) -{ - struct pcb *pcb = td->td_pcb; - -#ifdef __XSCALE__ - if ((fsr & FAULT_IMPRECISE) != 0 && - (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { - /* - * Oops, an imprecise, double abort fault. We've lost the - * r14_abt/spsr_abt values corresponding to the original - * abort, and the spsr saved in the trapframe indicates - * ABT mode. - */ - tf->tf_spsr &= ~PSR_MODE; - - /* - * We use a simple heuristic to determine if the double abort - * happened as a result of a kernel or user mode access. - * If the current trapframe is at the top of the kernel stack, - * the fault _must_ have come from user mode. - */ - if (tf != ((struct trapframe *)pcb->pcb_regs.sf_sp) - 1) { - /* - * Kernel mode. We're either about to die a - * spectacular death, or pcb_onfault will come - * to our rescue. Either way, the current value - * of tf->tf_pc is irrelevant. - */ - tf->tf_spsr |= PSR_SVC32_MODE; - if (pcb->pcb_onfault == NULL) - printf("\nKernel mode double abort!\n"); - } else { - /* - * User mode. We've lost the program counter at the - * time of the fault (not that it was accurate anyway; - * it's not called an imprecise fault for nothing). - * About all we can do is copy r14_usr to tf_pc and - * hope for the best. The process is about to get a - * SIGBUS, so it's probably history anyway. - */ - tf->tf_spsr |= PSR_USR32_MODE; - tf->tf_pc = tf->tf_usr_lr; - } - } - - /* FAR is invalid for imprecise exceptions */ - if ((fsr & FAULT_IMPRECISE) != 0) - far = 0; -#endif /* __XSCALE__ */ - - if (pcb->pcb_onfault) { - tf->tf_r0 = EFAULT; - tf->tf_pc = (register_t)(intptr_t) pcb->pcb_onfault; - return (0); - } - - /* - * At this point, if the fault happened in kernel mode, we're toast - */ - if (!TRAP_USERMODE(tf)) - dab_fatal(tf, fsr, far, td, ksig); - - /* Deliver a bus error signal to the process */ - ksig->signb = SIGBUS; - ksig->code = 0; - td->td_frame = tf; - - return (1); -} - -/* - * void prefetch_abort_handler(struct trapframe *tf) - * - * Abort handler called when instruction execution occurs at - * a non existent or restricted (access permissions) memory page. - * If the address is invalid and we were in SVC mode then panic as - * the kernel should never prefetch abort. - * If the address is invalid and the page is mapped then the user process - * does no have read permission so send it a signal. - * Otherwise fault the page in and try again. - */ -static void -prefetch_abort_handler(struct trapframe *tf) -{ - struct thread *td; - struct proc * p; - struct vm_map *map; - vm_offset_t fault_pc, va; - int error = 0; - struct ksig ksig; - - -#if 0 - /* Update vmmeter statistics */ - uvmexp.traps++; -#endif -#if 0 - printf("prefetch abort handler: %p %p\n", (void*)tf->tf_pc, - (void*)tf->tf_usr_lr); -#endif - - td = curthread; - p = td->td_proc; - PCPU_INC(cnt.v_trap); - - if (TRAP_USERMODE(tf)) { - td->td_frame = tf; - if (td->td_cowgen != td->td_proc->p_cowgen) - thread_cow_update(td); - } - fault_pc = tf->tf_pc; - if (td->td_md.md_spinlock_count == 0) { - if (__predict_true(tf->tf_spsr & PSR_I) == 0) - enable_interrupts(PSR_I); - if (__predict_true(tf->tf_spsr & PSR_F) == 0) - enable_interrupts(PSR_F); - } - - /* Prefetch aborts cannot happen in kernel mode */ - if (__predict_false(!TRAP_USERMODE(tf))) - dab_fatal(tf, 0, tf->tf_pc, NULL, &ksig); - td->td_pticks = 0; - - - /* Ok validate the address, can only execute in USER space */ - if (__predict_false(fault_pc >= VM_MAXUSER_ADDRESS || - (fault_pc < VM_MIN_ADDRESS && vector_page == ARM_VECTORS_LOW))) { - ksig.signb = SIGSEGV; - ksig.code = 0; - goto do_trapsignal; - } - - map = &td->td_proc->p_vmspace->vm_map; - va = trunc_page(fault_pc); - - /* - * See if the pmap can handle this fault on its own... - */ -#ifdef DEBUG - last_fault_code = -1; -#endif - if (pmap_fault_fixup(map->pmap, va, VM_PROT_READ, 1)) - goto out; - - error = vm_fault(map, va, VM_PROT_READ | VM_PROT_EXECUTE, - VM_FAULT_NORMAL); - if (__predict_true(error == 0)) - goto out; - - if (error == ENOMEM) { - printf("VM: pid %d (%s), uid %d killed: " - "out of swap\n", td->td_proc->p_pid, td->td_name, - (td->td_proc->p_ucred) ? - td->td_proc->p_ucred->cr_uid : -1); - ksig.signb = SIGKILL; - } else { - ksig.signb = SIGSEGV; - } - ksig.code = 0; - -do_trapsignal: - call_trapsignal(td, ksig.signb, ksig.code); - -out: - userret(td, tf); - -} - -extern int badaddr_read_1(const uint8_t *, uint8_t *); -extern int badaddr_read_2(const uint16_t *, uint16_t *); -extern int badaddr_read_4(const uint32_t *, uint32_t *); -/* - * Tentatively read an 8, 16, or 32-bit value from 'addr'. - * If the read succeeds, the value is written to 'rptr' and zero is returned. - * Else, return EFAULT. - */ -int -badaddr_read(void *addr, size_t size, void *rptr) -{ - union { - uint8_t v1; - uint16_t v2; - uint32_t v4; - } u; - int rv; - - cpu_drain_writebuf(); - - /* Read from the test address. */ - switch (size) { - case sizeof(uint8_t): - rv = badaddr_read_1(addr, &u.v1); - if (rv == 0 && rptr) - *(uint8_t *) rptr = u.v1; - break; - - case sizeof(uint16_t): - rv = badaddr_read_2(addr, &u.v2); - if (rv == 0 && rptr) - *(uint16_t *) rptr = u.v2; - break; - - case sizeof(uint32_t): - rv = badaddr_read_4(addr, &u.v4); - if (rv == 0 && rptr) - *(uint32_t *) rptr = u.v4; - break; - - default: - panic("badaddr: invalid size (%lu)", (u_long) size); - } - - /* Return EFAULT if the address was invalid, else zero */ - return (rv); -} diff --git a/sys/conf/files.arm b/sys/conf/files.arm index ac8b4e8914e6..d142450962c2 100644 --- a/sys/conf/files.arm +++ b/sys/conf/files.arm @@ -6,7 +6,7 @@ arm/arm/blockio.S standard arm/arm/bus_space_asm_generic.S standard arm/arm/bus_space_base.c optional fdt arm/arm/bus_space_generic.c standard -arm/arm/busdma_machdep.c optional !armv6 +arm/arm/busdma_machdep-v4.c optional !armv6 arm/arm/busdma_machdep-v6.c optional armv6 arm/arm/copystr.S standard arm/arm/cpufunc.c standard @@ -62,7 +62,7 @@ arm/arm/pl190.c optional pl190 arm/arm/pl310.c optional pl310 arm/arm/platform.c optional platform arm/arm/platform_if.m optional platform -arm/arm/pmap.c optional !armv6 +arm/arm/pmap-v4.c optional !armv6 arm/arm/pmap-v6.c optional armv6 arm/arm/pmu.c optional pmu | fdt hwpmc arm/arm/sc_machdep.c optional sc @@ -77,7 +77,7 @@ arm/arm/swtch-v4.S optional !armv6 arm/arm/swtch-v6.S optional armv6 arm/arm/sys_machdep.c standard arm/arm/syscall.c standard -arm/arm/trap.c optional !armv6 +arm/arm/trap-v4.c optional !armv6 arm/arm/trap-v6.c optional armv6 arm/arm/uio_machdep.c standard arm/arm/undefined.c standard -- cgit v1.2.3 From 6fe2e1762eb33d9386b9072e7897911cc3cc844b Mon Sep 17 00:00:00 2001 From: Kristof Provost Date: Sat, 20 Feb 2016 11:36:35 +0000 Subject: ifconfig(8): can't use 'name' or 'description' when creating interface with auto numbering If one does 'ifconfig tap create name blah', it will return error because the 'name' command doesn't properly populate the request sent to ioctl(...). The 'description' command has the same bug, and is also fixed with this patch. If one does 'ifconfig tap create mtu 9000 name blah', it DOES work, but 'tap0' (or other sequence number) is echoed, instead of the expected 'blah'. (assuming the name change actually succeeded) Submitted by: Marie Helene Kvello-Aune Differential Revision: https://reviews.freebsd.org/D5341 --- sbin/ifconfig/ifclone.c | 5 +++-- sbin/ifconfig/ifconfig.c | 20 ++++++++++++++++++++ sbin/ifconfig/ifconfig.h | 1 + 3 files changed, 24 insertions(+), 2 deletions(-) diff --git a/sbin/ifconfig/ifclone.c b/sbin/ifconfig/ifclone.c index 6a60d419f0fb..0eda4380d51e 100644 --- a/sbin/ifconfig/ifclone.c +++ b/sbin/ifconfig/ifclone.c @@ -144,11 +144,12 @@ ifclonecreate(int s, void *arg) } /* - * If we get a different name back than we put in, print it. + * If we get a different name back than we put in, update record and + * indicate it should be printed later. */ if (strncmp(name, ifr.ifr_name, sizeof(name)) != 0) { strlcpy(name, ifr.ifr_name, sizeof(name)); - printf("%s\n", name); + printifname = 1; } } diff --git a/sbin/ifconfig/ifconfig.c b/sbin/ifconfig/ifconfig.c index 0018b5cfcb02..a475139584e4 100644 --- a/sbin/ifconfig/ifconfig.c +++ b/sbin/ifconfig/ifconfig.c @@ -93,6 +93,7 @@ int clearaddr; int newaddr = 1; int verbose; int noload; +int printifname = 0; int supmedia = 0; int printkeys = 0; /* Print keying material for interfaces. */ @@ -108,6 +109,8 @@ static struct afswtch *af_getbyname(const char *name); static struct afswtch *af_getbyfamily(int af); static void af_other_status(int); +void printifnamemaybe(void); + static struct option *opts = NULL; struct ifa_order_elt { @@ -297,6 +300,12 @@ sortifaddrs(struct ifaddrs *list, return (result); } +void printifnamemaybe() +{ + if (printifname) + printf("%s\n", name); +} + int main(int argc, char *argv[]) { @@ -314,6 +323,12 @@ main(int argc, char *argv[]) size_t iflen; all = downonly = uponly = namesonly = noload = verbose = 0; + + /* + * Ensure we print interface name when expected to, + * even if we terminate early due to error. + */ + atexit(printifnamemaybe); /* Parse leading line options */ strlcpy(options, "adklmnuv", sizeof(options)); @@ -1011,6 +1026,8 @@ setifname(const char *val, int dummy __unused, int s, const struct afswtch *afp) { char *newname; + + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); newname = strdup(val); if (newname == NULL) @@ -1020,6 +1037,7 @@ setifname(const char *val, int dummy __unused, int s, free(newname); err(1, "ioctl SIOCSIFNAME (set name)"); } + printifname = 1; strlcpy(name, newname, sizeof(name)); free(newname); } @@ -1031,6 +1049,8 @@ setifdescr(const char *val, int dummy __unused, int s, { char *newdescr; + strncpy(ifr.ifr_name, name, sizeof(ifr.ifr_name)); + ifr.ifr_buffer.length = strlen(val) + 1; if (ifr.ifr_buffer.length == 1) { ifr.ifr_buffer.buffer = newdescr = NULL; diff --git a/sbin/ifconfig/ifconfig.h b/sbin/ifconfig/ifconfig.h index 6df9acf97313..ada224f67052 100644 --- a/sbin/ifconfig/ifconfig.h +++ b/sbin/ifconfig/ifconfig.h @@ -133,6 +133,7 @@ extern int supmedia; extern int printkeys; extern int newaddr; extern int verbose; +extern int printifname; void setifcap(const char *, int value, int s, const struct afswtch *); -- cgit v1.2.3 From 36e9c2cef0d59d4f4e466b558eaaab0b76ad3603 Mon Sep 17 00:00:00 2001 From: Zbigniew Bodek Date: Sat, 20 Feb 2016 12:28:20 +0000 Subject: Revert r295756: Extract common code from PowerPC's ofw_pci Import portions of the PowerPC OF PCI implementation into new file "ofw_pci.c", common for other platforms. The files ofw_pci.c and ofw_pci.h from sys/powerpc/ofw no longer exist. All required declarations are moved to sys/dev/ofw/ofw_pci.h. This creates a new ofw_pci_write_ivar() function and modifies ofw_pci_nranges(), ofw_pci_read_ivar(), ofw_pci_route_interrupt() methods. Most functions contain existing ppc implementations in the majority unchanged. Now there is no need to have multiple identical copies of methods for various architectures. Submitted by: Marcin Mazurek Obtained from: Semihalf Sponsored by: Annapurna Labs Reviewed by: jhibbits, mmel Differential Revision: https://reviews.freebsd.org/D4879 This needs to return to the drawing board as it breaks both PowerPC and Sparc64 build. Pointed out by: jhibbits --- sys/conf/files | 1 - sys/dev/ofw/ofw_pci.c | 622 ------------------------------------- sys/dev/ofw/ofw_pci.h | 57 +--- sys/dev/ofw/ofw_subr.c | 3 +- sys/powerpc/mpc85xx/pci_mpc85xx.c | 4 +- sys/powerpc/powermac/cpcht.c | 3 +- sys/powerpc/powermac/grackle.c | 3 +- sys/powerpc/powermac/uninorthpci.c | 3 +- sys/powerpc/powermac/uninorthvar.h | 1 + sys/powerpc/pseries/rtas_pci.c | 3 +- 10 files changed, 16 insertions(+), 684 deletions(-) delete mode 100644 sys/dev/ofw/ofw_pci.c diff --git a/sys/conf/files b/sys/conf/files index 6ecc83c8d792..bbf971319e09 100644 --- a/sys/conf/files +++ b/sys/conf/files @@ -2108,7 +2108,6 @@ dev/ofw/ofw_subr.c optional fdt dev/ofw/ofwbus.c optional fdt dev/ofw/openfirm.c optional fdt dev/ofw/openfirmio.c optional fdt -dev/ofw/ofw_pci.c optional fdt pci dev/ow/ow.c optional ow \ dependency "owll_if.h" \ dependency "own_if.h" diff --git a/sys/dev/ofw/ofw_pci.c b/sys/dev/ofw/ofw_pci.c deleted file mode 100644 index 72958db5d22d..000000000000 --- a/sys/dev/ofw/ofw_pci.c +++ /dev/null @@ -1,622 +0,0 @@ -/*- - * Copyright (c) 2011 Nathan Whitehorn - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -#include -__FBSDID("$FreeBSD$"); -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include - -#include -#include - -#include -#include -#include - -#include -#include - -#include "pcib_if.h" - -/* - * If it is necessary to set another value of this for - * some platforms it should be set at fdt.h file - */ -#ifndef PCI_MAP_INTR -#define PCI_MAP_INTR 4 -#endif - -#define PCI_INTR_PINS 4 - -/* - * bus interface. - */ -static struct resource * ofw_pci_alloc_resource(device_t, device_t, - int, int *, u_long, u_long, u_long, u_int); -static int ofw_pci_release_resource(device_t, device_t, int, int, - struct resource *); -static int ofw_pci_activate_resource(device_t, device_t, int, int, - struct resource *); -static int ofw_pci_deactivate_resource(device_t, device_t, int, int, - struct resource *); -static int ofw_pci_adjust_resource(device_t, device_t, int, - struct resource *, u_long, u_long); - -/* - * pcib interface - */ -static int ofw_pci_maxslots(device_t); - -/* - * ofw_bus interface - */ -static phandle_t ofw_pci_get_node(device_t, device_t); - -/* - * local methods - */ -static int ofw_pci_fill_ranges(phandle_t, struct ofw_pci_range *); - -/* - * Driver methods. - */ -static device_method_t ofw_pci_methods[] = { - - /* Device interface */ - DEVMETHOD(device_attach, ofw_pci_attach), - - /* Bus interface */ - DEVMETHOD(bus_print_child, bus_generic_print_child), - DEVMETHOD(bus_read_ivar, ofw_pci_read_ivar), - DEVMETHOD(bus_write_ivar, ofw_pci_write_ivar), - DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), - DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), - DEVMETHOD(bus_alloc_resource, ofw_pci_alloc_resource), - DEVMETHOD(bus_release_resource, ofw_pci_release_resource), - DEVMETHOD(bus_activate_resource, ofw_pci_activate_resource), - DEVMETHOD(bus_deactivate_resource, ofw_pci_deactivate_resource), - DEVMETHOD(bus_adjust_resource, ofw_pci_adjust_resource), -#ifdef __powerpc__ - DEVMETHOD(bus_get_bus_tag, ofw_pci_bus_get_bus_tag), -#endif - - /* pcib interface */ - DEVMETHOD(pcib_maxslots, ofw_pci_maxslots), - DEVMETHOD(pcib_route_interrupt, ofw_pci_route_interrupt), - - /* ofw_bus interface */ - DEVMETHOD(ofw_bus_get_node, ofw_pci_get_node), - - DEVMETHOD_END -}; - -DEFINE_CLASS_0(ofw_pci, ofw_pci_driver, ofw_pci_methods, 0); - -int -ofw_pci_init(device_t dev) -{ - struct ofw_pci_softc *sc; - phandle_t node; - u_int32_t busrange[2]; - struct ofw_pci_range *rp; - int error; - struct ofw_pci_cell_info *cell_info; - - node = ofw_bus_get_node(dev); - sc = device_get_softc(dev); - sc->sc_initialized = 1; - sc->sc_range = NULL; - - cell_info = (struct ofw_pci_cell_info *)malloc(sizeof(*cell_info), - M_DEVBUF, M_WAITOK | M_ZERO); - - sc->sc_cell_info = cell_info; - - if (OF_getencprop(node, "bus-range", busrange, sizeof(busrange)) != 8) - busrange[0] = 0; - - sc->sc_dev = dev; - sc->sc_node = node; - sc->sc_bus = busrange[0]; - - if (sc->sc_quirks & OFW_PCI_QUIRK_RANGES_ON_CHILDREN) { - phandle_t c; - int n, i; - - sc->sc_nrange = 0; - for (c = OF_child(node); c != 0; c = OF_peer(c)) { - n = ofw_pci_nranges(c, cell_info); - if (n > 0) - sc->sc_nrange += n; - } - if (sc->sc_nrange == 0) { - error = ENXIO; - goto out; - } - sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), - M_DEVBUF, M_WAITOK); - i = 0; - for (c = OF_child(node); c != 0; c = OF_peer(c)) { - n = ofw_pci_fill_ranges(c, &sc->sc_range[i]); - if (n > 0) - i += n; - } - KASSERT(i == sc->sc_nrange, ("range count mismatch")); - } else { - sc->sc_nrange = ofw_pci_nranges(node, cell_info); - if (sc->sc_nrange <= 0) { - device_printf(dev, "could not getranges\n"); - error = ENXIO; - goto out; - } - sc->sc_range = malloc(sc->sc_nrange * sizeof(sc->sc_range[0]), - M_DEVBUF, M_WAITOK); - ofw_pci_fill_ranges(node, sc->sc_range); - } - - sc->sc_io_rman.rm_type = RMAN_ARRAY; - sc->sc_io_rman.rm_descr = "PCI I/O Ports"; - error = rman_init(&sc->sc_io_rman); - if (error) { - device_printf(dev, "rman_init() failed. error = %d\n", error); - goto out; - } - - sc->sc_mem_rman.rm_type = RMAN_ARRAY; - sc->sc_mem_rman.rm_descr = "PCI Memory"; - error = rman_init(&sc->sc_mem_rman); - if (error) { - device_printf(dev, "rman_init() failed. error = %d\n", error); - goto out; - } - - for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && - rp->pci_hi != 0; rp++) { - error = 0; - - switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { - case OFW_PCI_PHYS_HI_SPACE_CONFIG: - break; - case OFW_PCI_PHYS_HI_SPACE_IO: - error = rman_manage_region(&sc->sc_io_rman, rp->pci, - rp->pci + rp->size - 1); - break; - case OFW_PCI_PHYS_HI_SPACE_MEM32: - case OFW_PCI_PHYS_HI_SPACE_MEM64: - error = rman_manage_region(&sc->sc_mem_rman, rp->pci, - rp->pci + rp->size - 1); - break; - } - - if (error) { - device_printf(dev, - "rman_manage_region(%x, %#jx, %#jx) failed. " - "error = %d\n", rp->pci_hi & - OFW_PCI_PHYS_HI_SPACEMASK, rp->pci, - rp->pci + rp->size - 1, error); - goto out; - } - } - - ofw_bus_setup_iinfo(node, &sc->sc_pci_iinfo, sizeof(cell_t)); - -out: - free(cell_info, M_DEVBUF); - free(sc->sc_range, M_DEVBUF); - rman_fini(&sc->sc_io_rman); - rman_fini(&sc->sc_mem_rman); - - return (error); -} - -int -ofw_pci_attach(device_t dev) -{ - struct ofw_pci_softc *sc; - int error; - - sc = device_get_softc(dev); - if (!sc->sc_initialized) { - error = ofw_pci_init(dev); - if (error) - return (error); - } - - device_add_child(dev, "pci", -1); - return (bus_generic_attach(dev)); -} - -static int -ofw_pci_maxslots(device_t dev) -{ - - return (PCI_SLOTMAX); -} - -int -ofw_pci_route_interrupt(device_t bus, device_t dev, int pin) -{ - struct ofw_pci_softc *sc; - struct ofw_pci_register reg; - uint32_t pintr, mintr[PCI_MAP_INTR]; - int intrcells; - phandle_t iparent; - - sc = device_get_softc(bus); - pintr = pin; - - /* Fabricate imap information in case this isn't an OFW device */ - bzero(®, sizeof(reg)); - reg.phys_hi = (pci_get_bus(dev) << OFW_PCI_PHYS_HI_BUSSHIFT) | - (pci_get_slot(dev) << OFW_PCI_PHYS_HI_DEVICESHIFT) | - (pci_get_function(dev) << OFW_PCI_PHYS_HI_FUNCTIONSHIFT); - - intrcells = ofw_bus_lookup_imap(ofw_bus_get_node(dev), - &sc->sc_pci_iinfo, ®, sizeof(reg), &pintr, sizeof(pintr), - mintr, sizeof(mintr), &iparent); - if (intrcells != 0) { - pintr = ofw_bus_map_intr(dev, iparent, intrcells, mintr); - return (pintr); - } - - /* - * Maybe it's a real interrupt, not an intpin - */ - if (pin > PCI_INTR_PINS) - return (pin); - - device_printf(bus, "could not route pin %d for device %d.%d\n", - pin, pci_get_slot(dev), pci_get_function(dev)); - return (PCI_INVALID_IRQ); -} - -int -ofw_pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result) -{ - struct ofw_pci_softc *sc; - - sc = device_get_softc(dev); - - switch (which) { - case PCIB_IVAR_DOMAIN: - *result = device_get_unit(dev); - return (0); - case PCIB_IVAR_BUS: - *result = sc->sc_bus; - return (0); - default: - break; - } - - return (ENOENT); -} - -int -ofw_pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value) -{ - struct ofw_pci_softc *sc; - - sc = device_get_softc(dev); - - switch (which) { - case PCIB_IVAR_BUS: - sc->sc_bus = value; - return (0); - default: - break; - } - - return (ENOENT); -} - -int -ofw_pci_nranges(phandle_t node, struct ofw_pci_cell_info *info) -{ - ssize_t nbase_ranges; - - if (info == NULL) - return (-1); - - info->host_address_cells = 1; - info->size_cells = 2; - info->pci_address_cell = 3; - - OF_getencprop(OF_parent(node), "#address-cells", - &(info->host_address_cells), sizeof(info->host_address_cells)); - OF_getencprop(node, "#address-cells", - &(info->pci_address_cell), sizeof(info->pci_address_cell)); - OF_getencprop(node, "#size-cells", &(info->size_cells), - sizeof(info->size_cells)); - - nbase_ranges = OF_getproplen(node, "ranges"); - if (nbase_ranges <= 0) - return (-1); - - return (nbase_ranges / sizeof(cell_t) / - (info->pci_address_cell + info->host_address_cells + - info->size_cells)); -} - -static struct resource * -ofw_pci_alloc_resource(device_t bus, device_t child, int type, int *rid, - rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) -{ - struct ofw_pci_softc *sc; - struct resource *rv; - struct rman *rm; - int needactivate; - - needactivate = flags & RF_ACTIVE; - flags &= ~RF_ACTIVE; - - sc = device_get_softc(bus); - - switch (type) { - case SYS_RES_MEMORY: - rm = &sc->sc_mem_rman; - break; - - case SYS_RES_IOPORT: - rm = &sc->sc_io_rman; - break; - - case SYS_RES_IRQ: - return (bus_alloc_resource(bus, type, rid, start, end, count, - flags)); - - default: - device_printf(bus, "unknown resource request from %s\n", - device_get_nameunit(child)); - return (NULL); - } - - rv = rman_reserve_resource(rm, start, end, count, flags, child); - if (rv == NULL) { - device_printf(bus, "failed to reserve resource for %s\n", - device_get_nameunit(child)); - return (NULL); - } - - rman_set_rid(rv, *rid); - - if (needactivate) { - if (bus_activate_resource(child, type, *rid, rv) != 0) { - device_printf(bus, - "failed to activate resource for %s\n", - device_get_nameunit(child)); - rman_release_resource(rv); - return (NULL); - } - } - - return (rv); -} - -static int -ofw_pci_release_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) -{ - - if (rman_get_flags(res) & RF_ACTIVE) { - int error = bus_deactivate_resource(child, type, rid, res); - if (error) - return error; - } - - return (rman_release_resource(res)); -} - -static int -ofw_pci_activate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) -{ - struct ofw_pci_softc *sc; - bus_space_handle_t handle; - bus_space_tag_t tag; - int rv; - - sc = device_get_softc(bus); - - if (type == SYS_RES_IRQ) { - return (bus_activate_resource(bus, type, rid, res)); - } - if (type == SYS_RES_MEMORY || type == SYS_RES_IOPORT) { - struct ofw_pci_range *rp; - vm_offset_t start; - int space; - - start = (vm_offset_t)rman_get_start(res); - - /* - * Map this through the ranges list - */ - for (rp = sc->sc_range; rp < sc->sc_range + sc->sc_nrange && - rp->pci_hi != 0; rp++) { - if (start < rp->pci || start >= rp->pci + rp->size) - continue; - - switch (rp->pci_hi & OFW_PCI_PHYS_HI_SPACEMASK) { - case OFW_PCI_PHYS_HI_SPACE_IO: - space = SYS_RES_IOPORT; - break; - case OFW_PCI_PHYS_HI_SPACE_MEM32: - case OFW_PCI_PHYS_HI_SPACE_MEM64: - space = SYS_RES_MEMORY; - break; - default: - space = -1; - } - - if (type == space) { - start += (rp->host - rp->pci); - break; - } - } - - if (bootverbose) - printf("ofw_pci mapdev: start %zx, len %ld\n", start, - rman_get_size(res)); - - tag = BUS_GET_BUS_TAG(child, child); - if (tag == NULL) - return (ENOMEM); - - rman_set_bustag(res, tag); - rv = bus_space_map(tag, start, - rman_get_size(res), 0, &handle); - if (rv != 0) - return (ENOMEM); - - rman_set_bushandle(res, handle); - rman_set_virtual(res, (void *)handle); /* XXX for powerpc only ? */ - } - - return (rman_activate_resource(res)); -} - -#ifdef __powerpc__ -static bus_space_tag_t -ofw_pci_bus_get_bus_tag(device_t bus, device_t child) -{ - - return (&bs_le_tag) -} -#endif - -static int -ofw_pci_deactivate_resource(device_t bus, device_t child, int type, int rid, - struct resource *res) -{ - - /* - * If this is a memory resource, unmap it. - */ - if ((type == SYS_RES_MEMORY) || (type == SYS_RES_IOPORT)) { - u_int32_t psize; - - psize = rman_get_size(res); - pmap_unmapdev((vm_offset_t)rman_get_virtual(res), psize); - } - - return (rman_deactivate_resource(res)); -} - -static int -ofw_pci_adjust_resource(device_t bus, device_t child, int type, - struct resource *res, rman_res_t start, rman_res_t end) -{ - struct rman *rm = NULL; - struct ofw_pci_softc *sc = device_get_softc(bus); - - KASSERT(!(rman_get_flags(res) & RF_ACTIVE), - ("active resources cannot be adjusted")); - if (rman_get_flags(res) & RF_ACTIVE) - return (EINVAL); - - switch (type) { - case SYS_RES_MEMORY: - rm = &sc->sc_mem_rman; - break; - case SYS_RES_IOPORT: - rm = &sc->sc_io_rman; - break; - default: - return (ENXIO); - } - - if (!rman_is_region_manager(res, rm)) - return (EINVAL); - - return (rman_adjust_resource(res, start, end)); -} - -static phandle_t -ofw_pci_get_node(device_t bus, device_t dev) -{ - struct ofw_pci_softc *sc; - - sc = device_get_softc(bus); - /* We only have one child, the PCI bus, which needs our own node. */ - - return (sc->sc_node); -} - -static int -ofw_pci_fill_ranges(phandle_t node, struct ofw_pci_range *ranges) -{ - int host_address_cells = 1, pci_address_cells = 3, size_cells = 2; - cell_t *base_ranges; - ssize_t nbase_ranges; - int nranges; - int i, j, k; - - OF_getencprop(OF_parent(node), "#address-cells", &host_address_cells, - sizeof(host_address_cells)); - OF_getencprop(node, "#address-cells", &pci_address_cells, - sizeof(pci_address_cells)); - OF_getencprop(node, "#size-cells", &size_cells, sizeof(size_cells)); - - nbase_ranges = OF_getproplen(node, "ranges"); - if (nbase_ranges <= 0) - return (-1); - nranges = nbase_ranges / sizeof(cell_t) / - (pci_address_cells + host_address_cells + size_cells); - - base_ranges = malloc(nbase_ranges, M_DEVBUF, M_WAITOK); - OF_getencprop(node, "ranges", base_ranges, nbase_ranges); - - for (i = 0, j = 0; i < nranges; i++) { - ranges[i].pci_hi = base_ranges[j++]; - ranges[i].pci = 0; - for (k = 0; k < pci_address_cells - 1; k++) { - ranges[i].pci <<= 32; - ranges[i].pci |= base_ranges[j++]; - } - ranges[i].host = 0; - for (k = 0; k < host_address_cells; k++) { - ranges[i].host <<= 32; - ranges[i].host |= base_ranges[j++]; - } - ranges[i].size = 0; - for (k = 0; k < size_cells; k++) { - ranges[i].size <<= 32; - ranges[i].size |= base_ranges[j++]; - } - } - - free(base_ranges, M_DEVBUF); - return (nranges); -} diff --git a/sys/dev/ofw/ofw_pci.h b/sys/dev/ofw/ofw_pci.h index 424b5f13ca33..eb60c5baee0b 100644 --- a/sys/dev/ofw/ofw_pci.h +++ b/sys/dev/ofw/ofw_pci.h @@ -82,18 +82,13 @@ #define OFW_PCI_PHYS_HI_SPACE_MEM32 0x02000000 #define OFW_PCI_PHYS_HI_SPACE_MEM64 0x03000000 -#define OFW_PCI_PHYS_HI_BUS(hi) \ +#define OFW_PCI_PHYS_HI_BUS(hi) \ (((hi) & OFW_PCI_PHYS_HI_BUSMASK) >> OFW_PCI_PHYS_HI_BUSSHIFT) -#define OFW_PCI_PHYS_HI_DEVICE(hi) \ +#define OFW_PCI_PHYS_HI_DEVICE(hi) \ (((hi) & OFW_PCI_PHYS_HI_DEVICEMASK) >> OFW_PCI_PHYS_HI_DEVICESHIFT) -#define OFW_PCI_PHYS_HI_FUNCTION(hi) \ +#define OFW_PCI_PHYS_HI_FUNCTION(hi) \ (((hi) & OFW_PCI_PHYS_HI_FUNCTIONMASK) >> OFW_PCI_PHYS_HI_FUNCTIONSHIFT) -/* - * Export class definition for inheritance purposes - */ -DECLARE_CLASS(ofw_pci_driver); - /* * This has the 3 32bit cell values, plus 2 more to make up a 64-bit size. */ @@ -105,50 +100,4 @@ struct ofw_pci_register { u_int32_t size_lo; }; -struct ofw_pci_cell_info { - pcell_t host_address_cells; - pcell_t pci_address_cell; - pcell_t size_cells; - }; - -struct ofw_pci_range { - uint32_t pci_hi; - uint64_t pci; - uint64_t host; - uint64_t size; -}; - -/* - * Quirks for some adapters - */ -enum { - OFW_PCI_QUIRK_RANGES_ON_CHILDREN = 1, -}; - -struct ofw_pci_softc { - device_t sc_dev; - phandle_t sc_node; - int sc_bus; - int sc_initialized; - int sc_quirks; - - struct ofw_pci_range *sc_range; - int sc_nrange; - struct ofw_pci_cell_info *sc_cell_info; - - struct rman sc_io_rman; - struct rman sc_mem_rman; - bus_space_tag_t sc_memt; - bus_dma_tag_t sc_dmat; - - struct ofw_bus_iinfo sc_pci_iinfo; -}; - -int ofw_pci_init(device_t); -int ofw_pci_attach(device_t); -int ofw_pci_read_ivar(device_t, device_t, int, uintptr_t *); -int ofw_pci_write_ivar(device_t, device_t, int, uintptr_t); -int ofw_pci_route_interrupt(device_t, device_t, int); -int ofw_pci_nranges(phandle_t, struct ofw_pci_cell_info *); - #endif /* _DEV_OFW_OFW_PCI_H_ */ diff --git a/sys/dev/ofw/ofw_subr.c b/sys/dev/ofw/ofw_subr.c index e9b66c284bca..4d14db798f38 100644 --- a/sys/dev/ofw/ofw_subr.c +++ b/sys/dev/ofw/ofw_subr.c @@ -39,9 +39,8 @@ __FBSDID("$FreeBSD$"); #include #include -#include -#include #include +#include static void get_addr_props(phandle_t node, uint32_t *addrp, uint32_t *sizep, int *pcip) diff --git a/sys/powerpc/mpc85xx/pci_mpc85xx.c b/sys/powerpc/mpc85xx/pci_mpc85xx.c index de55afc4dba2..4397ac0ad778 100644 --- a/sys/powerpc/mpc85xx/pci_mpc85xx.c +++ b/sys/powerpc/mpc85xx/pci_mpc85xx.c @@ -55,13 +55,15 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include -#include #include #include #include +#include + #include "ofw_bus_if.h" #include "pcib_if.h" diff --git a/sys/powerpc/powermac/cpcht.c b/sys/powerpc/powermac/cpcht.c index 737e872801f3..765d94624cd0 100644 --- a/sys/powerpc/powermac/cpcht.c +++ b/sys/powerpc/powermac/cpcht.c @@ -36,6 +36,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include @@ -50,7 +51,7 @@ __FBSDID("$FreeBSD$"); #include #include -#include +#include #include #include diff --git a/sys/powerpc/powermac/grackle.c b/sys/powerpc/powermac/grackle.c index f0928f3a8a74..95d59a1ca744 100644 --- a/sys/powerpc/powermac/grackle.c +++ b/sys/powerpc/powermac/grackle.c @@ -37,9 +37,9 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include -#include #include #include @@ -52,6 +52,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include diff --git a/sys/powerpc/powermac/uninorthpci.c b/sys/powerpc/powermac/uninorthpci.c index 5cb21c1bb13a..9da06ffe10de 100644 --- a/sys/powerpc/powermac/uninorthpci.c +++ b/sys/powerpc/powermac/uninorthpci.c @@ -34,9 +34,9 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include -#include #include #include @@ -49,6 +49,7 @@ __FBSDID("$FreeBSD$"); #include +#include #include #include diff --git a/sys/powerpc/powermac/uninorthvar.h b/sys/powerpc/powermac/uninorthvar.h index efe169c18c3d..e08478d7580b 100644 --- a/sys/powerpc/powermac/uninorthvar.h +++ b/sys/powerpc/powermac/uninorthvar.h @@ -30,6 +30,7 @@ #include #include +#include struct uninorth_softc { struct ofw_pci_softc pci_sc; diff --git a/sys/powerpc/pseries/rtas_pci.c b/sys/powerpc/pseries/rtas_pci.c index 1348fc8992dc..bb72b710e7b5 100644 --- a/sys/powerpc/pseries/rtas_pci.c +++ b/sys/powerpc/pseries/rtas_pci.c @@ -34,9 +34,9 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include -#include #include #include @@ -53,6 +53,7 @@ __FBSDID("$FreeBSD$"); #include #include +#include #include #include "pcib_if.h" -- cgit v1.2.3 From c90369f880c99f2d3abf4c3754c3a92ea1009249 Mon Sep 17 00:00:00 2001 From: Kristof Provost Date: Sat, 20 Feb 2016 12:53:53 +0000 Subject: in pf_print_state_parts, do not use skw->proto to print the protocol but our local copy proto that we very carefully set beforehands. skw being NULL is perfectly valid there. Obtained from: OpenBSD (henning) --- sys/netpfil/pf/pf.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/netpfil/pf/pf.c b/sys/netpfil/pf/pf.c index 1b7715309ba7..283dddc9920d 100644 --- a/sys/netpfil/pf/pf.c +++ b/sys/netpfil/pf/pf.c @@ -1848,7 +1848,7 @@ pf_print_state_parts(struct pf_state *s, printf("ICMPv6"); break; default: - printf("%u", skw->proto); + printf("%u", proto); break; } switch (dir) { -- cgit v1.2.3 From 94a4ee3be7e6acabfe967ae5b9168176e6a1de2b Mon Sep 17 00:00:00 2001 From: Konstantin Belousov Date: Sat, 20 Feb 2016 13:21:59 +0000 Subject: Switch /dev/hpet to use make_dev_s(9). Device needs si_drv1 initializated, do it correctly even though hpet cannot be loaded as module. Sponsored by: The FreeBSD Foundation MFC after: 1 week --- sys/dev/acpica/acpi_hpet.c | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/sys/dev/acpica/acpi_hpet.c b/sys/dev/acpica/acpi_hpet.c index 1b5a161eabba..de4436f2a209 100644 --- a/sys/dev/acpica/acpi_hpet.c +++ b/sys/dev/acpica/acpi_hpet.c @@ -422,8 +422,9 @@ hpet_attach(device_t dev) { struct hpet_softc *sc; struct hpet_timer *t; + struct make_dev_args mda; int i, j, num_msi, num_timers, num_percpu_et, num_percpu_t, cur_cpu; - int pcpu_master; + int pcpu_master, error; static int maxhpetet = 0; uint32_t val, val2, cvectors, dvectors; uint16_t vendor, rev; @@ -746,10 +747,14 @@ hpet_attach(device_t dev) } } - sc->pdev = make_dev(&hpet_cdevsw, 0, UID_ROOT, GID_WHEEL, - 0600, "hpet%d", device_get_unit(dev)); - if (sc->pdev) { - sc->pdev->si_drv1 = sc; + make_dev_args_init(&mda); + mda.mda_devsw = &hpet_cdevsw; + mda.mda_uid = UID_ROOT; + mda.mda_gid = GID_WHEEL; + mda.mda_mode = 0600; + mda.mda_si_drv1 = sc; + error = make_dev_s(&mda, &sc->pdev, "hpet%d", device_get_unit(dev)); + if (error == 0) { sc->mmap_allow = 1; TUNABLE_INT_FETCH("hw.acpi.hpet.mmap_allow", &sc->mmap_allow); @@ -766,9 +771,10 @@ hpet_attach(device_t dev) OID_AUTO, "mmap_allow_write", CTLFLAG_RW, &sc->mmap_allow_write, 0, "Allow userland write to the HPET register space"); - } else - device_printf(dev, "could not create /dev/hpet%d\n", - device_get_unit(dev)); + } else { + device_printf(dev, "could not create /dev/hpet%d, error %d\n", + device_get_unit(dev), error); + } return (0); } -- cgit v1.2.3 From 2fe1339ea26bb7ba59e844b5c1dee03250a7de16 Mon Sep 17 00:00:00 2001 From: Konstantin Belousov Date: Sat, 20 Feb 2016 13:37:04 +0000 Subject: Some BIOSes ACPI bytecode needs to take (sleepable) acpi mutex for acpi_GetInteger() execution. Intel DMAR interrupt remapping code needs to know UID of the HPET to properly route the FSB interrupts from the HPET, even when interrupt remapping is disabled, and the code is executed under some non-sleepable mutexes. Cache HPET UIDs in the device softc at the attach time and provide lock-less method to get UID, use the method from the dmar hpet handling code instead of calling GetInteger(). Reported and tested by: Larry Rosenman Sponsored by: The FreeBSD Foundation MFC after: 1 week --- sys/dev/acpica/acpi_hpet.c | 11 +++++++++++ sys/dev/acpica/acpivar.h | 2 ++ sys/x86/iommu/intel_drv.c | 8 ++------ 3 files changed, 15 insertions(+), 6 deletions(-) diff --git a/sys/dev/acpica/acpi_hpet.c b/sys/dev/acpica/acpi_hpet.c index de4436f2a209..76fbd5aba256 100644 --- a/sys/dev/acpica/acpi_hpet.c +++ b/sys/dev/acpica/acpi_hpet.c @@ -85,6 +85,7 @@ struct hpet_softc { struct resource *intr_res; void *intr_handle; ACPI_HANDLE handle; + uint32_t acpi_uid; uint64_t freq; uint32_t caps; struct timecounter tc; @@ -295,6 +296,15 @@ hpet_intr(void *arg) return (FILTER_STRAY); } +uint32_t +hpet_get_uid(device_t dev) +{ + struct hpet_softc *sc; + + sc = device_get_softc(dev); + return (sc->acpi_uid); +} + static ACPI_STATUS hpet_find(ACPI_HANDLE handle, UINT32 level, void *context, void **status) @@ -746,6 +756,7 @@ hpet_attach(device_t dev) maxhpetet++; } } + acpi_GetInteger(sc->handle, "_UID", &sc->acpi_uid); make_dev_args_init(&mda); mda.mda_devsw = &hpet_cdevsw; diff --git a/sys/dev/acpica/acpivar.h b/sys/dev/acpica/acpivar.h index 4f601c96fef0..4df83d5bcd23 100644 --- a/sys/dev/acpica/acpivar.h +++ b/sys/dev/acpica/acpivar.h @@ -441,6 +441,8 @@ int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int acpi_table_quirks(int *quirks); int acpi_machdep_quirks(int *quirks); +uint32_t hpet_get_uid(device_t dev); + /* Battery Abstraction. */ struct acpi_battinfo; diff --git a/sys/x86/iommu/intel_drv.c b/sys/x86/iommu/intel_drv.c index 47588af1cb62..e5d7783658cb 100644 --- a/sys/x86/iommu/intel_drv.c +++ b/sys/x86/iommu/intel_drv.c @@ -826,13 +826,9 @@ dmar_find_nonpci(u_int id, u_int entry_type, uint16_t *rid) struct dmar_unit * dmar_find_hpet(device_t dev, uint16_t *rid) { - ACPI_HANDLE handle; - uint32_t hpet_id; - handle = acpi_get_handle(dev); - if (ACPI_FAILURE(acpi_GetInteger(handle, "_UID", &hpet_id))) - return (NULL); - return (dmar_find_nonpci(hpet_id, ACPI_DMAR_SCOPE_TYPE_HPET, rid)); + return (dmar_find_nonpci(hpet_get_uid(dev), ACPI_DMAR_SCOPE_TYPE_HPET, + rid)); } struct dmar_unit * -- cgit v1.2.3 From f4cd39e3b2c1b96b2eed48c4f02a3955b09192b0 Mon Sep 17 00:00:00 2001 From: Andrew Turner Date: Sat, 20 Feb 2016 17:29:04 +0000 Subject: Add a missing call to dev_cleanup from the arm64 loader.efi. Sponsored by: ABT Systems Ltd --- sys/boot/efi/loader/arch/arm64/exec.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sys/boot/efi/loader/arch/arm64/exec.c b/sys/boot/efi/loader/arch/arm64/exec.c index a0f8b833eb7e..eb1830c548a4 100644 --- a/sys/boot/efi/loader/arch/arm64/exec.c +++ b/sys/boot/efi/loader/arch/arm64/exec.c @@ -117,6 +117,8 @@ elf64_exec(struct preloaded_file *fp) if (err != 0) return (err); + dev_cleanup(); + /* Clean D-cache under kernel area and invalidate whole I-cache */ clean_addr = (vm_offset_t)efi_translate(fp->f_addr); clean_size = (vm_offset_t)efi_translate(kernendp) - clean_addr; -- cgit v1.2.3 From 1e1bbb79fd726164b79f1c060f6ebb10ca7277a2 Mon Sep 17 00:00:00 2001 From: Dimitry Andric Date: Sun, 21 Feb 2016 13:03:58 +0000 Subject: Fix "invalid type '(null)'" usage messages in zfs(8) and zpool(8). Currently, zfs(8) and zpool(8) print "invalid type '(null)'" or similar messages, if you pass in invalid types, sources or column names for "zfs get", "zfs list" and "zpool get". This is because the commands use getsubopt(3), and in case of failure, they print 'value', which is NULL when sub options don't match. They should print 'suboptarg' instead, which is the documented way to get at the non-matching sub option value. Reviewed by: smh MFC after: 3 days Differential Revision: https://reviews.freebsd.org/D5365 --- cddl/contrib/opensolaris/cmd/zfs/zfs_main.c | 8 ++++---- cddl/contrib/opensolaris/cmd/zpool/zpool_main.c | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c b/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c index 9db2f73278d8..99f9a1e5441c 100644 --- a/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c +++ b/cddl/contrib/opensolaris/cmd/zfs/zfs_main.c @@ -1713,7 +1713,7 @@ zfs_do_get(int argc, char **argv) default: (void) fprintf(stderr, gettext("invalid column name " - "'%s'\n"), value); + "'%s'\n"), suboptarg); usage(B_FALSE); } } @@ -1750,7 +1750,7 @@ zfs_do_get(int argc, char **argv) default: (void) fprintf(stderr, gettext("invalid source " - "'%s'\n"), value); + "'%s'\n"), suboptarg); usage(B_FALSE); } } @@ -1786,7 +1786,7 @@ zfs_do_get(int argc, char **argv) default: (void) fprintf(stderr, gettext("invalid type '%s'\n"), - value); + suboptarg); usage(B_FALSE); } } @@ -3156,7 +3156,7 @@ zfs_do_list(int argc, char **argv) default: (void) fprintf(stderr, gettext("invalid type '%s'\n"), - value); + suboptarg); usage(B_FALSE); } } diff --git a/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c b/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c index 6e1670e8a392..c6662637c150 100644 --- a/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c +++ b/cddl/contrib/opensolaris/cmd/zpool/zpool_main.c @@ -5431,7 +5431,7 @@ zpool_do_get(int argc, char **argv) default: (void) fprintf(stderr, gettext("invalid column name " - "'%s'\n"), value); + "'%s'\n"), suboptarg); usage(B_FALSE); } } -- cgit v1.2.3 From 674677bd084eb8845dab81ad58c063f484546f81 Mon Sep 17 00:00:00 2001 From: Edward Tomasz Napierala Date: Sun, 21 Feb 2016 14:36:50 +0000 Subject: Make the "invalid numeric value" error message actually displayable (was a dead code before). Submitted by: bde@ (earlier version) Reviewed by: bde@ MFC after: 1 month Sponsored by: The FreeBSD Foundation --- bin/dd/args.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/bin/dd/args.c b/bin/dd/args.c index db8d445b5250..4607d673aa74 100644 --- a/bin/dd/args.c +++ b/bin/dd/args.c @@ -422,11 +422,10 @@ get_num(const char *val) errno = 0; num = strtoumax(val, &expr, 0); - if (errno != 0) /* Overflow or underflow. */ - err(1, "%s", oper); - if (expr == val) /* No valid digits. */ - errx(1, "%s: illegal numeric value", oper); + errx(1, "%s: invalid numeric value", oper); + if (errno != 0) + err(1, "%s", oper); mult = postfix_to_mult(*expr); @@ -472,11 +471,10 @@ get_off_t(const char *val) errno = 0; num = strtoimax(val, &expr, 0); - if (errno != 0) /* Overflow or underflow. */ - err(1, "%s", oper); - if (expr == val) /* No valid digits. */ - errx(1, "%s: illegal numeric value", oper); + errx(1, "%s: invalid numeric value", oper); + if (errno != 0) + err(1, "%s", oper); mult = postfix_to_mult(*expr); -- cgit v1.2.3 From f4d6a773f8ea77ed5a5b5321db6e760332f7a016 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Dag-Erling=20Sm=C3=B8rgrav?= Date: Sun, 21 Feb 2016 14:56:05 +0000 Subject: Implement /proc/$$/limits. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit PR: 207386 Submitted by: Szymon Åšliwa MFC after: 3 weeks --- sys/compat/linprocfs/linprocfs.c | 64 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/sys/compat/linprocfs/linprocfs.c b/sys/compat/linprocfs/linprocfs.c index 496afba8fcf6..86c4b050a959 100644 --- a/sys/compat/linprocfs/linprocfs.c +++ b/sys/compat/linprocfs/linprocfs.c @@ -61,6 +61,7 @@ __FBSDID("$FreeBSD$"); #include #include #include +#include #include #include #include @@ -1366,6 +1367,67 @@ linprocfs_dofdescfs(PFS_FILL_ARGS) return (0); } +/* + * Filler function for proc/pid/limits + */ + +#define RLIM_NONE -1 + +static const struct limit_info { + const char *desc; + const char *unit; + unsigned long long rlim_id; +} limits_info[] = { + { "Max cpu time", "seconds", RLIMIT_CPU }, + { "Max file size", "bytes", RLIMIT_FSIZE }, + { "Max data size", "bytes", RLIMIT_DATA }, + { "Max stack size", "bytes", RLIMIT_STACK }, + { "Max core file size", "bytes", RLIMIT_CORE }, + { "Max resident set", "bytes", RLIMIT_RSS }, + { "Max processes", "processes", RLIMIT_NPROC }, + { "Max open files", "files", RLIMIT_NOFILE }, + { "Max locked memory", "bytes", RLIMIT_MEMLOCK }, + { "Max address space", "bytes", RLIMIT_AS }, + { "Max file locks", "locks", RLIM_INFINITY }, + { "Max pending signals", "signals", RLIM_INFINITY }, + { "Max msgqueue size", "bytes", RLIM_NONE }, + { "Max nice priority", "", RLIM_NONE }, + { "Max realtime priority", "", RLIM_NONE }, + { "Max realtime timeout", "us", RLIM_INFINITY }, + { 0, 0, 0 } +}; + +static int +linprocfs_doproclimits(PFS_FILL_ARGS) +{ + const struct limit_info *li; + struct rlimit li_rlimits; + struct plimit *cur_proc_lim; + + cur_proc_lim = lim_alloc(); + lim_copy(cur_proc_lim, p->p_limit); + sbuf_printf(sb, "%-26s%-21s%-21s%-10s\n", "Limit", "Soft Limit", + "Hard Limit", "Units"); + for (li = limits_info; li->desc != NULL; ++li) { + if (li->rlim_id != RLIM_INFINITY && li->rlim_id != RLIM_NONE) + li_rlimits = cur_proc_lim->pl_rlimit[li->rlim_id]; + else { + li_rlimits.rlim_cur = 0; + li_rlimits.rlim_max = 0; + } + if (li->rlim_id == RLIM_INFINITY || + li_rlimits.rlim_cur == RLIM_INFINITY) + sbuf_printf(sb, "%-26s%-21s%-21s%-10s\n", + li->desc, "unlimited", "unlimited", li->unit); + else + sbuf_printf(sb, "%-26s%-21ld%-21ld%-10s\n", + li->desc, (long)li_rlimits.rlim_cur, + (long)li_rlimits.rlim_max, li->unit); + } + lim_free(cur_proc_lim); + return (0); +} + /* * Filler function for proc/sys/kernel/random/uuid @@ -1504,6 +1566,8 @@ linprocfs_init(PFS_INIT_ARGS) NULL, NULL, NULL, 0); pfs_create_file(dir, "auxv", &linprocfs_doauxv, NULL, &procfs_candebug, NULL, PFS_RD|PFS_RAWRD); + pfs_create_file(dir, "limits", &linprocfs_doproclimits, + NULL, NULL, NULL, PFS_RD); /* /proc/scsi/... */ dir = pfs_create_dir(root, "scsi", NULL, NULL, NULL, 0); -- cgit v1.2.3 From e1ba387e11b73f79c573be04e03035f547fcddbc Mon Sep 17 00:00:00 2001 From: Ian Lepore Date: Sun, 21 Feb 2016 14:59:24 +0000 Subject: Unconditionally set e_ident[OSABI]=ELFOSABI_FREEBSD in arm binary headers. When the armv6 support was imported from a project branch, this complex conditional logic and related #define'd values came along, but it's really not clear what the intent of it all was. The effect, however, was that OSABI was always set to zero, which is "UNIX System V ABI". Having the wrong value there causes pkg(8) to avoid looking inside arm elf binaries to determine shared-lib required/provides info for packaging. --- contrib/binutils/bfd/elf32-arm.c | 14 ++------------ 1 file changed, 2 insertions(+), 12 deletions(-) diff --git a/contrib/binutils/bfd/elf32-arm.c b/contrib/binutils/bfd/elf32-arm.c index e20ccca85162..066e17e5dae1 100644 --- a/contrib/binutils/bfd/elf32-arm.c +++ b/contrib/binutils/bfd/elf32-arm.c @@ -59,13 +59,6 @@ #define elf_info_to_howto 0 #define elf_info_to_howto_rel elf32_arm_info_to_howto -#define ARM_ELF_ABI_VERSION 0 -#ifdef __FreeBSD__ -#define ARM_ELF_OS_ABI_VERSION ELFOSABI_FREEBSD -#else -#define ARM_ELF_OS_ABI_VERSION ELFOSABI_ARM -#endif - static struct elf_backend_data elf32_arm_vxworks_bed; /* Note: code such as elf32_arm_reloc_type_lookup expect to use e.g. @@ -9377,11 +9370,8 @@ elf32_arm_post_process_headers (bfd * abfd, struct bfd_link_info * link_info ATT i_ehdrp = elf_elfheader (abfd); - if (EF_ARM_EABI_VERSION (i_ehdrp->e_flags) == EF_ARM_EABI_UNKNOWN) - i_ehdrp->e_ident[EI_OSABI] = ARM_ELF_OS_ABI_VERSION; - else - i_ehdrp->e_ident[EI_OSABI] = 0; - i_ehdrp->e_ident[EI_ABIVERSION] = ARM_ELF_ABI_VERSION; + i_ehdrp->e_ident[EI_OSABI] = ELFOSABI_FREEBSD; + i_ehdrp->e_ident[EI_ABIVERSION] = 0; if (link_info) { -- cgit v1.2.3 From 99c006f9ba0b87b721db8c14af56d1729739efc4 Mon Sep 17 00:00:00 2001 From: "Pedro F. Giffuni" Date: Sun, 21 Feb 2016 16:45:22 +0000 Subject: ostiInitiatorIOCompleted(): wrong sizeof() argument. Detected by: PVS Static Analysis CID: 1331601, 1331523 --- sys/dev/pms/freebsd/driver/ini/src/osapi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sys/dev/pms/freebsd/driver/ini/src/osapi.c b/sys/dev/pms/freebsd/driver/ini/src/osapi.c index e47bf7c79309..f46192341a1e 100644 --- a/sys/dev/pms/freebsd/driver/ini/src/osapi.c +++ b/sys/dev/pms/freebsd/driver/ini/src/osapi.c @@ -313,7 +313,7 @@ ostiInitiatorIOCompleted(tiRoot_t *ptiRoot, } sense_len = MIN( pSenseData->senseLen, pccb->senseLen - csio->sense_resid ); - bzero(&csio->sense_data, sizeof(&csio->sense_data)); + bzero(&csio->sense_data, sizeof(csio->sense_data)); AGTIAPI_PRINTK("ostiInitiatorIOCompleted: check condition copying\n"); memcpy( (void *)pccb->pSenseData, pSenseData->senseData, -- cgit v1.2.3 From 3b3c9ccf082864d7eacec9ccfb98603edbf93b5e Mon Sep 17 00:00:00 2001 From: Jilles Tjoelker Date: Sun, 21 Feb 2016 16:48:37 +0000 Subject: sh: Remove unnecessary flushouts while reading script. Output is flushed when a builtin is done or immediately after writing it (error messages, set -v output, prompts). --- bin/sh/input.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/bin/sh/input.c b/bin/sh/input.c index 5921e082da09..27b835a55b84 100644 --- a/bin/sh/input.c +++ b/bin/sh/input.c @@ -212,8 +212,6 @@ preadbuffer(void) } if (parsenleft == EOF_NLEFT || parsefile->buf == NULL) return PEOF; - flushout(&output); - flushout(&errout); again: if (parselleft <= 0) { -- cgit v1.2.3 From 37d65816e11e82946577c820cf9a85f8061ee9ab Mon Sep 17 00:00:00 2001 From: Ian Lepore Date: Sun, 21 Feb 2016 18:17:09 +0000 Subject: Minor style cleanups. Submitted by: bde --- sys/i386/i386/machdep.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sys/i386/i386/machdep.c b/sys/i386/i386/machdep.c index 15bc75c2d7b7..529112ddb3bf 100644 --- a/sys/i386/i386/machdep.c +++ b/sys/i386/i386/machdep.c @@ -2463,8 +2463,8 @@ init386(first) metadata_missing = 1; } - if (bootinfo.bi_envp) - init_static_kenv((caddr_t)bootinfo.bi_envp + KERNBASE, 0); + if (bootinfo.bi_envp != 0) + init_static_kenv((char *)bootinfo.bi_envp + KERNBASE, 0); else init_static_kenv(NULL, 0); -- cgit v1.2.3 From 85143dd18d753c89fb2f7057a3133006439f074f Mon Sep 17 00:00:00 2001 From: Ian Lepore Date: Sun, 21 Feb 2016 18:35:01 +0000 Subject: Allow a dynamic env to override a compiled-in static env by passing in the override indication in the env data. Submitted by: bde --- sys/kern/kern_environment.c | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/sys/kern/kern_environment.c b/sys/kern/kern_environment.c index cd6930b9cada..511ab697a525 100644 --- a/sys/kern/kern_environment.c +++ b/sys/kern/kern_environment.c @@ -217,6 +217,9 @@ done: * environment obtained from a boot loader, or to provide an empty buffer into * which MD code can store an initial environment using kern_setenv() calls. * + * When a copy of an initial environment is passed in, we start by scanning that + * env for overrides to the compiled-in envmode and hintmode variables. + * * If the global envmode is 1, the environment is initialized from the global * static_env[], regardless of the arguments passed. This implements the env * keyword described in config(5). In this case env_pos is set to env_len, @@ -238,6 +241,14 @@ done: void init_static_kenv(char *buf, size_t len) { + char *cp; + + for (cp = buf; cp != NULL && cp[0] != '\0'; cp += strlen(cp) + 1) { + if (strcmp(cp, "static_env.disabled=1") == 0) + envmode = 0; + if (strcmp(cp, "static_hints.disabled=1") == 0) + hintmode = 0; + } if (envmode == 1) { kern_envp = static_env; -- cgit v1.2.3 From 5036353a49355b8a984347eb5377693cff9aaa5f Mon Sep 17 00:00:00 2001 From: Andriy Voskoboinyk Date: Sun, 21 Feb 2016 18:51:48 +0000 Subject: rtwn: import r290048. - Fix scanning from AUTH state. Tested by: Simone Mario Lombardo PR: 203105 Reviewed by: kevlo Approved by: adrian (mentor) Differential Revision: https://reviews.freebsd.org/D4820 --- sys/dev/rtwn/if_rtwn.c | 69 +++++++++++++++++++++++++++++++------------------- 1 file changed, 43 insertions(+), 26 deletions(-) diff --git a/sys/dev/rtwn/if_rtwn.c b/sys/dev/rtwn/if_rtwn.c index 0d817fbc8c0c..8e8f1a363b0e 100644 --- a/sys/dev/rtwn/if_rtwn.c +++ b/sys/dev/rtwn/if_rtwn.c @@ -168,6 +168,8 @@ static void rtwn_get_txpower(struct rtwn_softc *, int, uint16_t[]); static void rtwn_set_txpower(struct rtwn_softc *, struct ieee80211_channel *, struct ieee80211_channel *); +static void rtwn_set_rx_bssid_all(struct rtwn_softc *, int); +static void rtwn_set_gain(struct rtwn_softc *, uint8_t); static void rtwn_scan_start(struct ieee80211com *); static void rtwn_scan_end(struct ieee80211com *); static void rtwn_set_channel(struct ieee80211com *); @@ -1237,22 +1239,6 @@ rtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) rtwn_set_led(sc, RTWN_LED_LINK, 0); break; case IEEE80211_S_SCAN: - if (vap->iv_state != IEEE80211_S_SCAN) { - /* Allow Rx from any BSSID. */ - rtwn_write_4(sc, R92C_RCR, - rtwn_read_4(sc, R92C_RCR) & - ~(R92C_RCR_CBSSID_DATA | R92C_RCR_CBSSID_BCN)); - - /* Set gain for scanning. */ - reg = rtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0)); - reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x20); - rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg); - - reg = rtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1)); - reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x20); - rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg); - } - /* Make link LED blink during scan. */ rtwn_set_led(sc, RTWN_LED_LINK, !sc->ledlink); @@ -1261,14 +1247,6 @@ rtwn_newstate(struct ieee80211vap *vap, enum ieee80211_state nstate, int arg) rtwn_read_1(sc, R92C_TXPAUSE) | 0x0f); break; case IEEE80211_S_AUTH: - /* Set initial gain under link. */ - reg = rtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0)); - reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x32); - rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg); - - reg = rtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1)); - reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, 0x32); - rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg); rtwn_set_chan(sc, ic->ic_curchan, NULL); break; case IEEE80211_S_RUN: @@ -2683,18 +2661,57 @@ rtwn_set_txpower(struct rtwn_softc *sc, struct ieee80211_channel *c, } } +static void +rtwn_set_rx_bssid_all(struct rtwn_softc *sc, int enable) +{ + uint32_t reg; + + reg = rtwn_read_4(sc, R92C_RCR); + if (enable) + reg &= ~R92C_RCR_CBSSID_BCN; + else + reg |= R92C_RCR_CBSSID_BCN; + rtwn_write_4(sc, R92C_RCR, reg); +} + +static void +rtwn_set_gain(struct rtwn_softc *sc, uint8_t gain) +{ + uint32_t reg; + + reg = rtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(0)); + reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, gain); + rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(0), reg); + + reg = rtwn_bb_read(sc, R92C_OFDM0_AGCCORE1(1)); + reg = RW(reg, R92C_OFDM0_AGCCORE1_GAIN, gain); + rtwn_bb_write(sc, R92C_OFDM0_AGCCORE1(1), reg); +} + static void rtwn_scan_start(struct ieee80211com *ic) { + struct rtwn_softc *sc = ic->ic_softc; - /* XXX do nothing? */ + RTWN_LOCK(sc); + /* Receive beacons / probe responses from any BSSID. */ + rtwn_set_rx_bssid_all(sc, 1); + /* Set gain for scanning. */ + rtwn_set_gain(sc, 0x20); + RTWN_UNLOCK(sc); } static void rtwn_scan_end(struct ieee80211com *ic) { + struct rtwn_softc *sc = ic->ic_softc; - /* XXX do nothing? */ + RTWN_LOCK(sc); + /* Restore limitations. */ + rtwn_set_rx_bssid_all(sc, 0); + /* Set gain under link. */ + rtwn_set_gain(sc, 0x32); + RTWN_UNLOCK(sc); } static void -- cgit v1.2.3 From c39f3bac71af80db95b52a86bb4b16f9cb95deb6 Mon Sep 17 00:00:00 2001 From: Jilles Tjoelker Date: Sun, 21 Feb 2016 18:54:17 +0000 Subject: sh: Optimize setprompt(0). Avoid doing work to print an empty prompt (such as when reading scripts). --- bin/sh/parser.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/bin/sh/parser.c b/bin/sh/parser.c index 53d7923f8755..d324d88a8f18 100644 --- a/bin/sh/parser.c +++ b/bin/sh/parser.c @@ -1930,6 +1930,8 @@ static void setprompt(int which) { whichprompt = which; + if (which == 0) + return; #ifndef NO_HISTORY if (!el) -- cgit v1.2.3 From 8be85b352ba7bfd61155b6dec07e299aa9f6c00d Mon Sep 17 00:00:00 2001 From: Ian Lepore Date: Sun, 21 Feb 2016 18:58:05 +0000 Subject: Document the ability to override compiled-in env and hints using variables in the bootloader-provided env. --- usr.sbin/config/config.5 | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/usr.sbin/config/config.5 b/usr.sbin/config/config.5 index dfc9fc8f2c35..e0e92b851491 100644 --- a/usr.sbin/config/config.5 +++ b/usr.sbin/config/config.5 @@ -23,7 +23,7 @@ .\" .\" $FreeBSD$ .\" -.Dd December 3, 2005 +.Dd February 21, 2016 .Dt CONFIG 5 .Os .Sh NAME @@ -118,7 +118,8 @@ The kernel normally uses an environment prepared for it at boot time by .Xr loader 8 . This directive makes the kernel ignore the boot environment and use -the compiled-in environment instead. +the compiled-in environment instead, unless the boot environment contains +.Va static_env.disabled=1 . .Pp This directive is useful for setting kernel tunables in embedded environments that do not start from @@ -141,7 +142,9 @@ time (see .Xr device.hints 5 ) . This directive configures the kernel to use the static device configuration listed in -.Ar filename . +.Ar filename , +unless the boot environment contains +.Va static_hints.disabled=1 . The file .Ar filename must conform to the syntax specified by -- cgit v1.2.3 From 2ece3386512695d90cddaf183e9f0208652f893b Mon Sep 17 00:00:00 2001 From: Jilles Tjoelker Date: Sun, 21 Feb 2016 20:58:24 +0000 Subject: sh: Don't hash alias name when there are no aliases. --- bin/sh/alias.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/bin/sh/alias.c b/bin/sh/alias.c index a77ce99a9f07..a35513b0011b 100644 --- a/bin/sh/alias.c +++ b/bin/sh/alias.c @@ -144,9 +144,11 @@ rmaliases(void) struct alias * lookupalias(const char *name, int check) { - struct alias *ap = *hashalias(name); + struct alias *ap; - for (; ap; ap = ap->next) { + if (aliases == 0) + return (NULL); + for (ap = *hashalias(name); ap; ap = ap->next) { if (equal(name, ap->name)) { if (check && (ap->flag & ALIASINUSE)) return (NULL); -- cgit v1.2.3 From fe3232f39a69ffce51ff4b866661418b8924140f Mon Sep 17 00:00:00 2001 From: Andrew Turner Date: Sun, 21 Feb 2016 21:20:23 +0000 Subject: Make efi_time and EFI_GetTimeOfDay static, neither are used by other parts of the efi code. Sponsored by: ABT Systems Ltd --- sys/boot/efi/include/efilib.h | 1 - sys/boot/efi/libefi/time.c | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/sys/boot/efi/include/efilib.h b/sys/boot/efi/include/efilib.h index ba5b663da5f0..6717cb10f6be 100644 --- a/sys/boot/efi/include/efilib.h +++ b/sys/boot/efi/include/efilib.h @@ -46,7 +46,6 @@ int efi_handle_lookup(EFI_HANDLE, struct devsw **, int *, uint64_t *); int efi_handle_update_dev(EFI_HANDLE, struct devsw *, int, uint64_t); int efi_status_to_errno(EFI_STATUS); -time_t efi_time(EFI_TIME *); EFI_STATUS main(int argc, CHAR16 *argv[]); void exit(EFI_STATUS status); diff --git a/sys/boot/efi/libefi/time.c b/sys/boot/efi/libefi/time.c index 5c39415368d7..1f9d5daed1f6 100644 --- a/sys/boot/efi/libefi/time.c +++ b/sys/boot/efi/libefi/time.c @@ -58,7 +58,7 @@ __FBSDID("$FreeBSD$"); #define SECSPERHOUR ( 60*60 ) #define SECSPERDAY (24 * SECSPERHOUR) -time_t +static time_t efi_time(EFI_TIME *ETime) { /* @@ -164,7 +164,7 @@ efi_time(EFI_TIME *ETime) return UTime; } -int +static int EFI_GetTimeOfDay( OUT struct timeval *tp, OUT struct timezone *tzp -- cgit v1.2.3 From 7873b2abd6d9c5ad78f084c09e4159389dbb4339 Mon Sep 17 00:00:00 2001 From: Andriy Voskoboinyk Date: Mon, 22 Feb 2016 00:48:53 +0000 Subject: urtwn: add an option to compile the driver without firmware specific code - Add URTWN_WITHOUT_UCODE option (will disable any firmware specific code when set). - Do not exclude the driver from build when MK_SOURCELESS_UCODE is set (URTWN_WITHOUT_UCODE will be enforced unconditionally). - Do not abort initialization when firmware cannot be loaded; behave like the URTWN_WITHOUT_UCODE option was set. - Drop some unused variables from urtwn_softc structure. Tested with RTL8188EU and RTL8188CUS in HOSTAP and STA modes. Reviewed by: kevlo Approved by: adrian (mentor) Differential Revision: https://reviews.freebsd.org/D4849 --- sys/conf/options | 3 +++ sys/dev/usb/wlan/if_urtwn.c | 23 ++++++++++++++++++++--- sys/dev/usb/wlan/if_urtwnvar.h | 8 ++------ sys/modules/usb/Makefile | 3 +-- sys/modules/usb/urtwn/Makefile | 9 ++++++++- 5 files changed, 34 insertions(+), 12 deletions(-) diff --git a/sys/conf/options b/sys/conf/options index f1b2af473ceb..f7c48c2b61b6 100644 --- a/sys/conf/options +++ b/sys/conf/options @@ -673,6 +673,9 @@ UPLCOM_INTR_INTERVAL opt_uplcom.h UVSCOM_DEFAULT_OPKTSIZE opt_uvscom.h UVSCOM_INTR_INTERVAL opt_uvscom.h +# options for the Realtek RTL8188*U/RTL8192CU driver (urtwn) +URTWN_WITHOUT_UCODE opt_urtwn.h + # Embedded system options INIT_PATH diff --git a/sys/dev/usb/wlan/if_urtwn.c b/sys/dev/usb/wlan/if_urtwn.c index a6a8f5057f99..fa93210470eb 100644 --- a/sys/dev/usb/wlan/if_urtwn.c +++ b/sys/dev/usb/wlan/if_urtwn.c @@ -26,6 +26,7 @@ __FBSDID("$FreeBSD$"); */ #include "opt_wlan.h" +#include "opt_urtwn.h" #include #include @@ -308,11 +309,13 @@ static void urtwn_parent(struct ieee80211com *); static int urtwn_r92c_power_on(struct urtwn_softc *); static int urtwn_r88e_power_on(struct urtwn_softc *); static int urtwn_llt_init(struct urtwn_softc *); +#ifndef URTWN_WITHOUT_UCODE static void urtwn_fw_reset(struct urtwn_softc *); static void urtwn_r88e_fw_reset(struct urtwn_softc *); static int urtwn_fw_loadpage(struct urtwn_softc *, int, const uint8_t *, int); static int urtwn_load_firmware(struct urtwn_softc *); +#endif static int urtwn_dma_init(struct urtwn_softc *); static int urtwn_mac_init(struct urtwn_softc *); static void urtwn_bb_init(struct urtwn_softc *); @@ -1376,6 +1379,13 @@ urtwn_fw_cmd(struct urtwn_softc *sc, uint8_t id, const void *buf, int len) usb_error_t error; int ntries; + if (!(sc->sc_flags & URTWN_FW_LOADED)) { + URTWN_DPRINTF(sc, URTWN_DEBUG_FIRMWARE, "%s: firmware " + "was not loaded; command (id %d) will be discarded\n", + __func__, id); + return (0); + } + /* Wait for current FW box to be empty. */ for (ntries = 0; ntries < 100; ntries++) { if (!(urtwn_read_1(sc, R92C_HMETFR) & (1 << sc->fwcur))) @@ -3275,6 +3285,7 @@ urtwn_llt_init(struct urtwn_softc *sc) return (error); } +#ifndef URTWN_WITHOUT_UCODE static void urtwn_fw_reset(struct urtwn_softc *sc) { @@ -3457,6 +3468,7 @@ fail: firmware_put(fw, FIRMWARE_UNLOAD); return (error); } +#endif static int urtwn_dma_init(struct urtwn_softc *sc) @@ -4786,10 +4798,12 @@ urtwn_init(struct urtwn_softc *sc) urtwn_write_1(sc, R92C_BCN_MAX_ERR, 0xff); } +#ifndef URTWN_WITHOUT_UCODE /* Load 8051 microcode. */ error = urtwn_load_firmware(sc); - if (error != 0) - goto fail; + if (error == 0) + sc->sc_flags |= URTWN_FW_LOADED; +#endif /* Initialize MAC/BB/RF blocks. */ error = urtwn_mac_init(sc); @@ -4892,7 +4906,8 @@ urtwn_stop(struct urtwn_softc *sc) return; } - sc->sc_flags &= ~(URTWN_RUNNING | URTWN_TEMP_MEASURED); + sc->sc_flags &= ~(URTWN_RUNNING | URTWN_FW_LOADED | + URTWN_TEMP_MEASURED); sc->thcal_lctemp = 0; callout_stop(&sc->sc_watchdog_ch); urtwn_abort_xfers(sc); @@ -4991,6 +5006,8 @@ static devclass_t urtwn_devclass; DRIVER_MODULE(urtwn, uhub, urtwn_driver, urtwn_devclass, NULL, NULL); MODULE_DEPEND(urtwn, usb, 1, 1, 1); MODULE_DEPEND(urtwn, wlan, 1, 1, 1); +#ifndef URTWN_WITHOUT_UCODE MODULE_DEPEND(urtwn, firmware, 1, 1, 1); +#endif MODULE_VERSION(urtwn, 1); USB_PNP_HOST_INFO(urtwn_devs); diff --git a/sys/dev/usb/wlan/if_urtwnvar.h b/sys/dev/usb/wlan/if_urtwnvar.h index ac94f96003e4..ce388d22189e 100644 --- a/sys/dev/usb/wlan/if_urtwnvar.h +++ b/sys/dev/usb/wlan/if_urtwnvar.h @@ -155,7 +155,8 @@ struct urtwn_softc { uint8_t sc_flags; #define URTWN_FLAG_CCK_HIPWR 0x01 #define URTWN_DETACHED 0x02 -#define URTWN_RUNNING 0x04 +#define URTWN_RUNNING 0x04 +#define URTWN_FW_LOADED 0x08 #define URTWN_TEMP_MEASURED 0x10 u_int chip; @@ -196,11 +197,6 @@ struct urtwn_softc { urtwn_datahead sc_tx_inactive; urtwn_datahead sc_tx_pending; - const char *fwname; - const struct firmware *fw_fp; - struct urtwn_fw_info fw; - void *fw_virtaddr; - union urtwn_rom rom; uint16_t last_rom_addr; diff --git a/sys/modules/usb/Makefile b/sys/modules/usb/Makefile index 8bb355ba2beb..b236d04b139c 100644 --- a/sys/modules/usb/Makefile +++ b/sys/modules/usb/Makefile @@ -47,7 +47,7 @@ SUBDIR = usb SUBDIR += ${_dwc_otg} ehci ${_musb} ohci uhci xhci ${_uss820dci} ${_at91dci} \ ${_atmegadci} ${_avr32dci} ${_rsu} ${_rsufw} ${_saf1761otg} SUBDIR += ${_rum} ${_run} ${_runfw} ${_uath} upgt usie ural ${_zyd} ${_urtw} -SUBDIR += ${_urtwn} ${_urtwnfw} +SUBDIR += urtwn ${_urtwnfw} SUBDIR += atp uhid ukbd ums udbp ufm uep wsp ugold uled SUBDIR += ucom u3g uark ubsa ubser uchcom ucycom ufoma uftdi ugensa uipaq ulpt \ umct umcs umodem umoscom uplcom uslcom uvisor uvscom @@ -70,7 +70,6 @@ _rum= rum _uath= uath _zyd= zyd _kue= kue -_urtwn= urtwn _urtwnfw= urtwnfw _run= run _runfw= runfw diff --git a/sys/modules/usb/urtwn/Makefile b/sys/modules/usb/urtwn/Makefile index 656fab93a330..59fa910aa82c 100644 --- a/sys/modules/usb/urtwn/Makefile +++ b/sys/modules/usb/urtwn/Makefile @@ -2,9 +2,16 @@ .PATH: ${.CURDIR}/../../../dev/usb/wlan +.include + KMOD = if_urtwn SRCS = if_urtwn.c if_urtwnreg.h if_urtwnvar.h \ bus_if.h device_if.h \ - opt_bus.h opt_usb.h opt_wlan.h usb_if.h usbdevs.h + opt_bus.h opt_urtwn.h opt_usb.h opt_wlan.h usb_if.h usbdevs.h + +.if ${MK_SOURCELESS_UCODE} == "no" +opt_urtwn.h: + @echo "#define URTWN_WITHOUT_UCODE 1" > ${.TARGET} +.endif .include -- cgit v1.2.3 From 15b185341d71185dedca7884be9f02c83fd444f3 Mon Sep 17 00:00:00 2001 From: Pyun YongHyeon Date: Mon, 22 Feb 2016 00:58:04 +0000 Subject: ifnet lock was changed to use sx(9) long time ago. Don't hold a driver lock for if_free(9). --- sys/dev/msk/if_msk.c | 4 ++-- sys/dev/sk/if_sk.c | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/sys/dev/msk/if_msk.c b/sys/dev/msk/if_msk.c index cc6d8e45ac2e..ffbe36eb423a 100644 --- a/sys/dev/msk/if_msk.c +++ b/sys/dev/msk/if_msk.c @@ -2059,11 +2059,11 @@ msk_detach(device_t dev) msk_txrx_dma_free(sc_if); bus_generic_detach(dev); - if (ifp) - if_free(ifp); sc = sc_if->msk_softc; sc->msk_if[sc_if->msk_port] = NULL; MSK_IF_UNLOCK(sc_if); + if (ifp) + if_free(ifp); return (0); } diff --git a/sys/dev/sk/if_sk.c b/sys/dev/sk/if_sk.c index 2678390eb8e0..bdf4365c2f17 100644 --- a/sys/dev/sk/if_sk.c +++ b/sys/dev/sk/if_sk.c @@ -1833,8 +1833,6 @@ sk_detach(dev) ether_ifdetach(ifp); SK_IF_LOCK(sc_if); } - if (ifp) - if_free(ifp); /* * We're generally called from skc_detach() which is using * device_delete_child() to get to here. It's already trashed @@ -1848,6 +1846,8 @@ sk_detach(dev) sk_dma_jumbo_free(sc_if); sk_dma_free(sc_if); SK_IF_UNLOCK(sc_if); + if (ifp) + if_free(ifp); return(0); } -- cgit v1.2.3 From 6e2ab416930367e94a35ead805d688b0f0093ea9 Mon Sep 17 00:00:00 2001 From: Andriy Voskoboinyk Date: Mon, 22 Feb 2016 01:15:02 +0000 Subject: urtwn: shutdown the device properly - R92C path: NetBSD (mostly) - R88E path: TP-Link driver Tested with RTL8188EU and RTL8188CUS. Reviewed by: kevlo Approved by: adrian (mentor) Differential Revision: https://reviews.freebsd.org/D5198 --- sys/dev/usb/wlan/if_urtwn.c | 272 ++++++++++++++++++++++++++++++++++++++++- sys/dev/usb/wlan/if_urtwnreg.h | 25 ++++ sys/dev/usb/wlan/if_urtwnvar.h | 1 + 3 files changed, 296 insertions(+), 2 deletions(-) diff --git a/sys/dev/usb/wlan/if_urtwn.c b/sys/dev/usb/wlan/if_urtwn.c index fa93210470eb..a793ba93c2d3 100644 --- a/sys/dev/usb/wlan/if_urtwn.c +++ b/sys/dev/usb/wlan/if_urtwn.c @@ -308,6 +308,8 @@ static void urtwn_start(struct urtwn_softc *); static void urtwn_parent(struct ieee80211com *); static int urtwn_r92c_power_on(struct urtwn_softc *); static int urtwn_r88e_power_on(struct urtwn_softc *); +static void urtwn_r92c_power_off(struct urtwn_softc *); +static void urtwn_r88e_power_off(struct urtwn_softc *); static int urtwn_llt_init(struct urtwn_softc *); #ifndef URTWN_WITHOUT_UCODE static void urtwn_fw_reset(struct urtwn_softc *); @@ -1782,6 +1784,7 @@ urtwn_read_rom(struct urtwn_softc *sc) sc->sc_rf_write = urtwn_r92c_rf_write; sc->sc_power_on = urtwn_r92c_power_on; + sc->sc_power_off = urtwn_r92c_power_off; return (0); } @@ -1809,6 +1812,7 @@ urtwn_r88e_read_rom(struct urtwn_softc *sc) sc->sc_rf_write = urtwn_r88e_rf_write; sc->sc_power_on = urtwn_r88e_power_on; + sc->sc_power_off = urtwn_r88e_power_off; return (0); } @@ -3235,7 +3239,7 @@ urtwn_r88e_power_on(struct urtwn_softc *sc) /* Enable LDO normal mode. */ error = urtwn_write_1(sc, R92C_LPLDO_CTRL, - urtwn_read_1(sc, R92C_LPLDO_CTRL) & ~0x10); + urtwn_read_1(sc, R92C_LPLDO_CTRL) & ~R92C_LPLDO_CTRL_SLEEP); if (error != USB_ERR_NORMAL_COMPLETION) return (EIO); @@ -3254,6 +3258,269 @@ urtwn_r88e_power_on(struct urtwn_softc *sc) return (0); } +static __inline void +urtwn_power_off(struct urtwn_softc *sc) +{ + + return sc->sc_power_off(sc); +} + +static void +urtwn_r92c_power_off(struct urtwn_softc *sc) +{ + uint32_t reg; + + /* Block all Tx queues. */ + urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); + + /* Disable RF */ + urtwn_rf_write(sc, 0, 0, 0); + + urtwn_write_1(sc, R92C_APSD_CTRL, R92C_APSD_CTRL_OFF); + + /* Reset BB state machine */ + urtwn_write_1(sc, R92C_SYS_FUNC_EN, + R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_USBA | + R92C_SYS_FUNC_EN_BB_GLB_RST); + urtwn_write_1(sc, R92C_SYS_FUNC_EN, + R92C_SYS_FUNC_EN_USBD | R92C_SYS_FUNC_EN_USBA); + + /* + * Reset digital sequence + */ +#ifndef URTWN_WITHOUT_UCODE + if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RDY) { + /* Reset MCU ready status */ + urtwn_write_1(sc, R92C_MCUFWDL, 0); + + /* If firmware in ram code, do reset */ + urtwn_fw_reset(sc); + } +#endif + + /* Reset MAC and Enable 8051 */ + urtwn_write_1(sc, R92C_SYS_FUNC_EN + 1, + (R92C_SYS_FUNC_EN_CPUEN | + R92C_SYS_FUNC_EN_ELDR | + R92C_SYS_FUNC_EN_HWPDN) >> 8); + + /* Reset MCU ready status */ + urtwn_write_1(sc, R92C_MCUFWDL, 0); + + /* Disable MAC clock */ + urtwn_write_2(sc, R92C_SYS_CLKR, + R92C_SYS_CLKR_ANAD16V_EN | + R92C_SYS_CLKR_ANA8M | + R92C_SYS_CLKR_LOADER_EN | + R92C_SYS_CLKR_80M_SSC_DIS | + R92C_SYS_CLKR_SYS_EN | + R92C_SYS_CLKR_RING_EN | + 0x4000); + + /* Disable AFE PLL */ + urtwn_write_1(sc, R92C_AFE_PLL_CTRL, 0x80); + + /* Gated AFE DIG_CLOCK */ + urtwn_write_2(sc, R92C_AFE_XTAL_CTRL, 0x880F); + + /* Isolated digital to PON */ + urtwn_write_1(sc, R92C_SYS_ISO_CTRL, + R92C_SYS_ISO_CTRL_MD2PP | + R92C_SYS_ISO_CTRL_PA2PCIE | + R92C_SYS_ISO_CTRL_PD2CORE | + R92C_SYS_ISO_CTRL_IP2MAC | + R92C_SYS_ISO_CTRL_DIOP | + R92C_SYS_ISO_CTRL_DIOE); + + /* + * Pull GPIO PIN to balance level and LED control + */ + /* 1. Disable GPIO[7:0] */ + urtwn_write_2(sc, R92C_GPIO_IOSEL, 0x0000); + + reg = urtwn_read_4(sc, R92C_GPIO_PIN_CTRL) & ~0x0000ff00; + reg |= ((reg << 8) & 0x0000ff00) | 0x00ff0000; + urtwn_write_4(sc, R92C_GPIO_PIN_CTRL, reg); + + /* Disable GPIO[10:8] */ + urtwn_write_1(sc, R92C_MAC_PINMUX_CFG, 0x00); + + reg = urtwn_read_2(sc, R92C_GPIO_IO_SEL) & ~0x00f0; + reg |= (((reg & 0x000f) << 4) | 0x0780); + urtwn_write_2(sc, R92C_GPIO_IO_SEL, reg); + + /* Disable LED0 & 1 */ + urtwn_write_2(sc, R92C_LEDCFG0, 0x8080); + + /* + * Reset digital sequence + */ + /* Disable ELDR clock */ + urtwn_write_2(sc, R92C_SYS_CLKR, + R92C_SYS_CLKR_ANAD16V_EN | + R92C_SYS_CLKR_ANA8M | + R92C_SYS_CLKR_LOADER_EN | + R92C_SYS_CLKR_80M_SSC_DIS | + R92C_SYS_CLKR_SYS_EN | + R92C_SYS_CLKR_RING_EN | + 0x4000); + + /* Isolated ELDR to PON */ + urtwn_write_1(sc, R92C_SYS_ISO_CTRL + 1, + (R92C_SYS_ISO_CTRL_DIOR | + R92C_SYS_ISO_CTRL_PWC_EV12V) >> 8); + + /* + * Disable analog sequence + */ + /* Disable A15 power */ + urtwn_write_1(sc, R92C_LDOA15_CTRL, R92C_LDOA15_CTRL_OBUF); + /* Disable digital core power */ + urtwn_write_1(sc, R92C_LDOV12D_CTRL, + urtwn_read_1(sc, R92C_LDOV12D_CTRL) & + ~R92C_LDOV12D_CTRL_LDV12_EN); + + /* Enter PFM mode */ + urtwn_write_1(sc, R92C_SPS0_CTRL, 0x23); + + /* Set USB suspend */ + urtwn_write_2(sc, R92C_APS_FSMCO, + R92C_APS_FSMCO_APDM_HOST | + R92C_APS_FSMCO_AFSM_HSUS | + R92C_APS_FSMCO_PFM_ALDN); + + /* Lock ISO/CLK/Power control register. */ + urtwn_write_1(sc, R92C_RSV_CTRL, 0x0E); +} + +static void +urtwn_r88e_power_off(struct urtwn_softc *sc) +{ + uint8_t reg; + int ntries; + + /* Disable any kind of TX reports. */ + urtwn_write_1(sc, R88E_TX_RPT_CTRL, + urtwn_read_1(sc, R88E_TX_RPT_CTRL) & + ~(R88E_TX_RPT1_ENA | R88E_TX_RPT2_ENA)); + + /* Stop Rx. */ + urtwn_write_1(sc, R92C_CR, 0); + + /* Move card to Low Power State. */ + /* Block all Tx queues. */ + urtwn_write_1(sc, R92C_TXPAUSE, R92C_TX_QUEUE_ALL); + + for (ntries = 0; ntries < 20; ntries++) { + /* Should be zero if no packet is transmitting. */ + if (urtwn_read_4(sc, R88E_SCH_TXCMD) == 0) + break; + + urtwn_ms_delay(sc); + } + if (ntries == 20) { + device_printf(sc->sc_dev, "%s: failed to block Tx queues\n", + __func__); + return; + } + + /* CCK and OFDM are disabled, and clock are gated. */ + urtwn_write_1(sc, R92C_SYS_FUNC_EN, + urtwn_read_1(sc, R92C_SYS_FUNC_EN) & ~R92C_SYS_FUNC_EN_BBRSTB); + + urtwn_ms_delay(sc); + + /* Reset MAC TRX */ + urtwn_write_1(sc, R92C_CR, + R92C_CR_HCI_TXDMA_EN | R92C_CR_HCI_RXDMA_EN | + R92C_CR_TXDMA_EN | R92C_CR_RXDMA_EN | + R92C_CR_PROTOCOL_EN | R92C_CR_SCHEDULE_EN); + + /* check if removed later */ + urtwn_write_1(sc, R92C_CR + 1, + urtwn_read_1(sc, R92C_CR + 1) & ~(R92C_CR_ENSEC >> 8)); + + /* Respond TxOK to scheduler */ + urtwn_write_1(sc, R92C_DUAL_TSF_RST, + urtwn_read_1(sc, R92C_DUAL_TSF_RST) | 0x20); + + /* If firmware in ram code, do reset. */ +#ifndef URTWN_WITHOUT_UCODE + if (urtwn_read_1(sc, R92C_MCUFWDL) & R92C_MCUFWDL_RDY) + urtwn_r88e_fw_reset(sc); +#endif + + /* Reset MCU ready status. */ + urtwn_write_1(sc, R92C_MCUFWDL, 0x00); + + /* Disable 32k. */ + urtwn_write_1(sc, R88E_32K_CTRL, + urtwn_read_1(sc, R88E_32K_CTRL) & ~0x01); + + /* Move card to Disabled state. */ + /* Turn off RF. */ + urtwn_write_1(sc, R92C_RF_CTRL, 0); + + /* LDO Sleep mode. */ + urtwn_write_1(sc, R92C_LPLDO_CTRL, + urtwn_read_1(sc, R92C_LPLDO_CTRL) | R92C_LPLDO_CTRL_SLEEP); + + /* Turn off MAC by HW state machine */ + urtwn_write_1(sc, R92C_APS_FSMCO + 1, + urtwn_read_1(sc, R92C_APS_FSMCO + 1) | + (R92C_APS_FSMCO_APFM_OFF >> 8)); + + for (ntries = 0; ntries < 20; ntries++) { + /* Wait until it will be disabled. */ + if ((urtwn_read_1(sc, R92C_APS_FSMCO + 1) & + (R92C_APS_FSMCO_APFM_OFF >> 8)) == 0) + break; + + urtwn_ms_delay(sc); + } + if (ntries == 20) { + device_printf(sc->sc_dev, "%s: could not turn off MAC\n", + __func__); + return; + } + + /* schmit trigger */ + urtwn_write_1(sc, R92C_AFE_XTAL_CTRL + 2, + urtwn_read_1(sc, R92C_AFE_XTAL_CTRL + 2) | 0x80); + + /* Enable WL suspend. */ + urtwn_write_1(sc, R92C_APS_FSMCO + 1, + (urtwn_read_1(sc, R92C_APS_FSMCO + 1) & ~0x10) | 0x08); + + /* Enable bandgap mbias in suspend. */ + urtwn_write_1(sc, R92C_APS_FSMCO + 3, 0); + + /* Clear SIC_EN register. */ + urtwn_write_1(sc, R92C_GPIO_MUXCFG + 1, + urtwn_read_1(sc, R92C_GPIO_MUXCFG + 1) & ~0x10); + + /* Set USB suspend enable local register */ + urtwn_write_1(sc, R92C_USB_SUSPEND, + urtwn_read_1(sc, R92C_USB_SUSPEND) | 0x10); + + /* Reset MCU IO Wrapper. */ + reg = urtwn_read_1(sc, R92C_RSV_CTRL + 1); + urtwn_write_1(sc, R92C_RSV_CTRL + 1, reg & ~0x08); + urtwn_write_1(sc, R92C_RSV_CTRL + 1, reg | 0x08); + + /* marked as 'For Power Consumption' code. */ + urtwn_write_1(sc, R92C_GPIO_OUT, urtwn_read_1(sc, R92C_GPIO_IN)); + urtwn_write_1(sc, R92C_GPIO_IOSEL, 0xff); + + urtwn_write_1(sc, R92C_GPIO_IO_SEL, + urtwn_read_1(sc, R92C_GPIO_IO_SEL) << 4); + urtwn_write_1(sc, R92C_GPIO_MOD, + urtwn_read_1(sc, R92C_GPIO_MOD) | 0x0f); + + /* Set LNA, TRSW, EX_PA Pin to output mode. */ + urtwn_write_4(sc, R88E_BB_PAD_CTRL, 0x00080808); +} + static int urtwn_llt_init(struct urtwn_softc *sc) { @@ -4910,9 +5177,10 @@ urtwn_stop(struct urtwn_softc *sc) URTWN_TEMP_MEASURED); sc->thcal_lctemp = 0; callout_stop(&sc->sc_watchdog_ch); - urtwn_abort_xfers(sc); + urtwn_abort_xfers(sc); urtwn_drain_mbufq(sc); + urtwn_power_off(sc); URTWN_UNLOCK(sc); } diff --git a/sys/dev/usb/wlan/if_urtwnreg.h b/sys/dev/usb/wlan/if_urtwnreg.h index 5b8b4f5de200..72835f36c026 100644 --- a/sys/dev/usb/wlan/if_urtwnreg.h +++ b/sys/dev/usb/wlan/if_urtwnreg.h @@ -70,6 +70,10 @@ #define R92C_GPIO_IO_SEL 0x042 #define R92C_MAC_PINMUX_CFG 0x043 #define R92C_GPIO_PIN_CTRL 0x044 +#define R92C_GPIO_IN 0x044 +#define R92C_GPIO_OUT 0x045 +#define R92C_GPIO_IOSEL 0x046 +#define R92C_GPIO_MOD 0x047 #define R92C_GPIO_INTM 0x048 #define R92C_LEDCFG0 0x04c #define R92C_LEDCFG1 0x04d @@ -79,6 +83,7 @@ #define R92C_FSISR 0x054 #define R92C_HSIMR 0x058 #define R92C_HSISR 0x05c +#define R88E_BB_PAD_CTRL 0x064 #define R92C_MCUFWDL 0x080 #define R92C_HMEBOX_EXT(idx) (0x088 + (idx) * 2) #define R88E_HIMR 0x0b0 @@ -117,6 +122,7 @@ #define R92C_MBIST_START 0x174 #define R92C_MBIST_DONE 0x178 #define R92C_MBIST_FAIL 0x17c +#define R88E_32K_CTRL 0x194 #define R92C_C2HEVT_MSG_NORMAL 0x1a0 #define R92C_C2HEVT_MSG_TEST 0x1b8 #define R92C_C2HEVT_CLEAR 0x1bf @@ -204,6 +210,7 @@ #define R92C_BE_ADMTIME 0x5c8 #define R92C_EDCA_RANDOM_GEN 0x5cc #define R92C_SCH_TXCMD 0x5d0 +#define R88E_SCH_TXCMD 0x5f8 /* WMAC Configuration. */ #define R92C_APSD_CTRL 0x600 #define R92C_BWOPMODE 0x603 @@ -303,13 +310,30 @@ #define R92C_RF_CTRL_RSTB 0x02 #define R92C_RF_CTRL_SDMRSTB 0x04 +/* Bits for R92C_LDOA15_CTRL. */ +#define R92C_LDOA15_CTRL_EN 0x01 +#define R92C_LDOA15_CTRL_STBY 0x02 +#define R92C_LDOA15_CTRL_OBUF 0x04 +#define R92C_LDOA15_CTRL_REG_VOS 0x08 + /* Bits for R92C_LDOV12D_CTRL. */ #define R92C_LDOV12D_CTRL_LDV12_EN 0x01 +/* Bits for R92C_LPLDO_CTRL. */ +#define R92C_LPLDO_CTRL_SLEEP 0x10 + /* Bits for R92C_AFE_XTAL_CTRL. */ #define R92C_AFE_XTAL_CTRL_ADDR_M 0x007ff800 #define R92C_AFE_XTAL_CTRL_ADDR_S 11 +/* Bits for R92C_AFE_PLL_CTRL. */ +#define R92C_AFE_PLL_CTRL_EN 0x0001 +#define R92C_AFE_PLL_CTRL_320_EN 0x0002 +#define R92C_AFE_PLL_CTRL_FREF_SEL 0x0004 +#define R92C_AFE_PLL_CTRL_EDGE_SEL 0x0008 +#define R92C_AFE_PLL_CTRL_WDOGB 0x0010 +#define R92C_AFE_PLL_CTRL_LPFEN 0x0020 + /* Bits for R92C_EFUSE_CTRL. */ #define R92C_EFUSE_CTRL_DATA_M 0x000000ff #define R92C_EFUSE_CTRL_DATA_S 0 @@ -748,6 +772,7 @@ /* * USB registers. */ +#define R92C_USB_SUSPEND 0xfe10 #define R92C_USB_INFO 0xfe17 #define R92C_USB_SPECIAL_OPTION 0xfe55 #define R92C_USB_HCPWM 0xfe57 diff --git a/sys/dev/usb/wlan/if_urtwnvar.h b/sys/dev/usb/wlan/if_urtwnvar.h index ce388d22189e..ba8ca8130344 100644 --- a/sys/dev/usb/wlan/if_urtwnvar.h +++ b/sys/dev/usb/wlan/if_urtwnvar.h @@ -172,6 +172,7 @@ struct urtwn_softc { void (*sc_rf_write)(struct urtwn_softc *, int, uint8_t, uint32_t); int (*sc_power_on)(struct urtwn_softc *); + void (*sc_power_off)(struct urtwn_softc *); struct ieee80211_node *node_list[R88E_MACID_MAX + 1]; struct mtx nt_mtx; -- cgit v1.2.3 From 009a73c4ba32b974e2a804ee7a99a11f195da398 Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Mon, 22 Feb 2016 06:17:26 +0000 Subject: hyperv/hn: Rename TX related function and struct fields a bit Preamble to implement the ifnet.if_transmit method. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5344 --- sys/dev/hyperv/netvsc/hv_net_vsc.h | 2 +- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 16 ++++++++-------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h index 693c584ea5f6..042f21f16c71 100644 --- a/sys/dev/hyperv/netvsc/hv_net_vsc.h +++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h @@ -1030,7 +1030,7 @@ struct hn_tx_ring { int hn_sched_tx; struct taskqueue *hn_tx_taskq; - struct task hn_start_task; + struct task hn_tx_task; struct task hn_txeof_task; struct mtx hn_tx_lock; diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 14e44c2091b1..d91ada5f92e9 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -298,8 +298,8 @@ static int hn_create_tx_ring(struct hn_softc *, int); static void hn_destroy_tx_ring(struct hn_tx_ring *); static int hn_create_tx_data(struct hn_softc *); static void hn_destroy_tx_data(struct hn_softc *); -static void hn_start_taskfunc(void *xsc, int pending); -static void hn_txeof_taskfunc(void *xsc, int pending); +static void hn_start_taskfunc(void *, int); +static void hn_start_txeof_taskfunc(void *, int); static void hn_stop_tx_tasks(struct hn_softc *); static int hn_encap(struct hn_tx_ring *, struct hn_txdesc *, struct mbuf **); static void hn_create_rx_data(struct hn_softc *sc); @@ -1555,7 +1555,7 @@ hn_start(struct ifnet *ifp) return; } do_sched: - taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_start_task); + taskqueue_enqueue(txr->hn_tx_taskq, &txr->hn_tx_task); } static void @@ -1577,7 +1577,7 @@ hn_start_txeof(struct hn_tx_ring *txr) mtx_unlock(&txr->hn_tx_lock); if (sched) { taskqueue_enqueue(txr->hn_tx_taskq, - &txr->hn_start_task); + &txr->hn_tx_task); } } else { do_sched: @@ -2103,8 +2103,8 @@ hn_create_tx_ring(struct hn_softc *sc, int id) #endif txr->hn_tx_taskq = sc->hn_tx_taskq; - TASK_INIT(&txr->hn_start_task, 0, hn_start_taskfunc, txr); - TASK_INIT(&txr->hn_txeof_task, 0, hn_txeof_taskfunc, txr); + TASK_INIT(&txr->hn_tx_task, 0, hn_start_taskfunc, txr); + TASK_INIT(&txr->hn_txeof_task, 0, hn_start_txeof_taskfunc, txr); txr->hn_direct_tx_size = hn_direct_tx_size; if (hv_vmbus_protocal_version >= HV_VMBUS_VERSION_WIN8_1) @@ -2399,7 +2399,7 @@ hn_start_taskfunc(void *xtxr, int pending __unused) } static void -hn_txeof_taskfunc(void *xtxr, int pending __unused) +hn_start_txeof_taskfunc(void *xtxr, int pending __unused) { struct hn_tx_ring *txr = xtxr; @@ -2417,7 +2417,7 @@ hn_stop_tx_tasks(struct hn_softc *sc) for (i = 0; i < sc->hn_tx_ring_cnt; ++i) { struct hn_tx_ring *txr = &sc->hn_tx_ring[i]; - taskqueue_drain(txr->hn_tx_taskq, &txr->hn_start_task); + taskqueue_drain(txr->hn_tx_taskq, &txr->hn_tx_task); taskqueue_drain(txr->hn_tx_taskq, &txr->hn_txeof_task); } } -- cgit v1.2.3 From 5cb0a42c8ca92dd8162d335f25e3926df0b0217c Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Mon, 22 Feb 2016 06:22:47 +0000 Subject: hyperv/hn: Staticize and rename packet TX done function It is only used in hv_netvsc_drv_freebsd.c; and rename it to hn_tx_done() mainly to reserve "xmit" for ifnet.if_transmit implement. While I'm here, remove unapplied comment. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5345 --- sys/dev/hyperv/netvsc/hv_net_vsc.h | 1 - sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 15 ++++----------- 2 files changed, 4 insertions(+), 12 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h index 042f21f16c71..f5338334f284 100644 --- a/sys/dev/hyperv/netvsc/hv_net_vsc.h +++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h @@ -1086,7 +1086,6 @@ typedef struct hn_softc { extern int hv_promisc_mode; void netvsc_linkstatus_callback(struct hv_device *device_obj, uint32_t status); -void netvsc_xmit_completion(void *context); void hv_nv_on_receive_completion(struct hv_device *device, uint64_t tid, uint32_t status); netvsc_dev *hv_nv_on_device_add(struct hv_device *device, diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index d91ada5f92e9..3f3b6a7a68ac 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -653,17 +653,10 @@ hn_txdesc_hold(struct hn_txdesc *txd) atomic_add_int(&txd->refs, 1); } -/* - * Send completion processing - * - * Note: It looks like offset 0 of buf is reserved to hold the softc - * pointer. The sc pointer is not currently needed in this function, and - * it is not presently populated by the TX function. - */ -void -netvsc_xmit_completion(void *context) +static void +hn_tx_done(void *xpkt) { - netvsc_packet *packet = context; + netvsc_packet *packet = xpkt; struct hn_txdesc *txd; struct hn_tx_ring *txr; @@ -905,7 +898,7 @@ done: txd->m = m_head; /* Set the completion routine */ - packet->compl.send.on_send_completion = netvsc_xmit_completion; + packet->compl.send.on_send_completion = hn_tx_done; packet->compl.send.send_completion_context = packet; packet->compl.send.send_completion_tid = (uint64_t)(uintptr_t)txd; -- cgit v1.2.3 From ed3960349bd87a4f3f3d9a1189b5425ec677797e Mon Sep 17 00:00:00 2001 From: Sepherosa Ziehau Date: Mon, 22 Feb 2016 06:28:18 +0000 Subject: hyperv/hn: Add TX method for txeof processing. Preamble to implement ifnet.if_transmit method. Reviewed by: adrian Approved by: adrian (mentor) MFC after: 1 week Sponsored by: Microsoft OSTC Differential Revision: https://reviews.freebsd.org/D5346 --- sys/dev/hyperv/netvsc/hv_net_vsc.h | 3 ++- sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 14 ++++++++------ 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/sys/dev/hyperv/netvsc/hv_net_vsc.h b/sys/dev/hyperv/netvsc/hv_net_vsc.h index f5338334f284..7750ad4887b1 100644 --- a/sys/dev/hyperv/netvsc/hv_net_vsc.h +++ b/sys/dev/hyperv/netvsc/hv_net_vsc.h @@ -1026,9 +1026,10 @@ struct hn_tx_ring { #endif int hn_txdesc_cnt; int hn_txdesc_avail; - int hn_txeof; + int hn_has_txeof; int hn_sched_tx; + void (*hn_txeof)(struct hn_tx_ring *); struct taskqueue *hn_tx_taskq; struct task hn_tx_task; struct task hn_txeof_task; diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 3f3b6a7a68ac..5c4476f03fed 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -664,7 +664,7 @@ hn_tx_done(void *xpkt) packet->compl.send.send_completion_tid; txr = txd->txr; - txr->hn_txeof = 1; + txr->hn_has_txeof = 1; hn_txdesc_put(txr, txd); } @@ -684,11 +684,11 @@ netvsc_channel_rollup(struct hv_device *device_ctx) } #endif - if (!txr->hn_txeof) + if (!txr->hn_has_txeof) return; - txr->hn_txeof = 0; - hn_start_txeof(txr); + txr->hn_has_txeof = 0; + txr->hn_txeof(txr); } /* @@ -976,12 +976,12 @@ again: * commands to run? Ask netvsc_channel_rollup() * to kick start later. */ - txr->hn_txeof = 1; + txr->hn_has_txeof = 1; if (!send_failed) { txr->hn_send_failed++; send_failed = 1; /* - * Try sending again after set hn_txeof; + * Try sending again after set hn_has_txeof; * in case that we missed the last * netvsc_channel_rollup(). */ @@ -2111,6 +2111,8 @@ hn_create_tx_ring(struct hn_softc *sc, int id) */ txr->hn_sched_tx = 1; + txr->hn_txeof = hn_start_txeof; /* TODO: if_transmit */ + parent_dtag = bus_get_dma_tag(sc->hn_dev); /* DMA tag for RNDIS messages. */ -- cgit v1.2.3 From a1e1814d763ae0dd1c50a85bbd6884969ed26126 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Mon, 22 Feb 2016 09:02:20 +0000 Subject: As is included from , there is no need to include it explicitly when is already included. Reviewed by: alc, kib Differential Revision: https://reviews.freebsd.org/D5373 --- share/man/man9/bios.9 | 1 - sys/amd64/cloudabi64/cloudabi64_sysvec.c | 1 - sys/amd64/vmm/amd/npt.c | 2 -- sys/amd64/vmm/amd/svm.c | 1 - sys/arm/annapurna/alpine/alpine_machdep.c | 1 - sys/arm/arm/mp_machdep.c | 1 - sys/arm64/arm64/minidump_machdep.c | 1 - sys/arm64/cloudabi64/cloudabi64_sysvec.c | 1 - sys/compat/linuxkpi/common/include/linux/dma-mapping.h | 1 - sys/compat/linuxkpi/common/include/linux/list.h | 1 - sys/compat/linuxkpi/common/src/linux_compat.c | 1 - sys/compat/linuxkpi/common/src/linux_pci.c | 1 - sys/dev/ce/if_ce.c | 1 - sys/dev/cp/if_cp.c | 1 - sys/dev/drm/drmP.h | 1 - sys/dev/drm2/drmP.h | 1 - sys/dev/fb/machfb.c | 1 - sys/dev/isci/isci_oem_parameters.c | 1 - sys/dev/ntb/if_ntb/if_ntb.c | 1 - sys/dev/ntb/ntb_hw/ntb_hw.c | 1 - sys/dev/pms/freebsd/driver/ini/src/agtiapi.c | 1 - sys/dev/rt/if_rt.c | 1 - sys/dev/siba/siba_pcib.c | 1 - sys/dev/vt/hw/efifb/efifb.c | 1 - sys/i386/bios/mca_machdep.c | 1 - sys/i386/pci/pci_cfgreg.c | 1 - sys/mips/adm5120/admpci.c | 1 - sys/mips/atheros/ar71xx_fixup.c | 1 - sys/mips/atheros/ar71xx_pci.c | 1 - sys/mips/atheros/ar71xx_spi.c | 1 - sys/mips/atheros/ar724x_pci.c | 1 - sys/mips/atheros/if_arge.c | 1 - sys/mips/atheros/qca955x_pci.c | 1 - sys/mips/cavium/cvmx_config.h | 1 - sys/mips/cavium/octopci.c | 1 - sys/mips/idt/idtpci.c | 1 - sys/mips/malta/gt_pci.c | 1 - sys/mips/mips/minidump_machdep.c | 1 - sys/mips/mips/nexus.c | 1 - sys/mips/nlm/xlp_simplebus.c | 1 - sys/mips/rt305x/rt305x_pci.c | 1 - sys/mips/sibyte/sb_zbpci.c | 1 - sys/powerpc/aim/slb.c | 1 - sys/powerpc/ofw/ofw_real.c | 1 - sys/powerpc/ofw/rtas.c | 1 - sys/powerpc/powermac/macgpio.c | 1 - sys/powerpc/powermac/macio.c | 1 - sys/powerpc/powermac/platform_powermac.c | 1 - sys/powerpc/powerpc/genassym.c | 1 - sys/powerpc/powerpc/trap.c | 1 - sys/powerpc/ps3/if_glc.c | 1 - sys/powerpc/ps3/platform_ps3.c | 1 - sys/powerpc/ps3/ps3_syscons.c | 1 - sys/powerpc/ps3/ps3bus.c | 1 - sys/powerpc/ps3/ps3cdrom.c | 1 - sys/powerpc/ps3/ps3disk.c | 1 - sys/powerpc/pseries/platform_chrp.c | 1 - sys/powerpc/psim/iobus.c | 1 - sys/sparc64/pci/fire.c | 1 - sys/sparc64/sparc64/iommu.c | 1 - sys/sparc64/sparc64/tlb.c | 1 - sys/x86/acpica/acpi_wakeup.c | 1 - sys/x86/x86/nexus.c | 1 - 63 files changed, 64 deletions(-) diff --git a/share/man/man9/bios.9 b/share/man/man9/bios.9 index 48bbe16c0fcf..d5e764b06cfd 100644 --- a/share/man/man9/bios.9 +++ b/share/man/man9/bios.9 @@ -38,7 +38,6 @@ .In vm/vm.h .In vm/pmap.h .In machine/param.h -.In machine/pmap.h .In machine/pc/bios.h .Ft uint32_t .Fn bios_sigsearch "uint32_t start" "u_char *sig" "int siglen" "int paralen" "int sigofs" diff --git a/sys/amd64/cloudabi64/cloudabi64_sysvec.c b/sys/amd64/cloudabi64/cloudabi64_sysvec.c index e6e680bb512d..30a2bbf78498 100644 --- a/sys/amd64/cloudabi64/cloudabi64_sysvec.c +++ b/sys/amd64/cloudabi64/cloudabi64_sysvec.c @@ -36,7 +36,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/amd64/vmm/amd/npt.c b/sys/amd64/vmm/amd/npt.c index bebb4d587628..e1c1b79e1b38 100644 --- a/sys/amd64/vmm/amd/npt.c +++ b/sys/amd64/vmm/amd/npt.c @@ -36,8 +36,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include - #include "npt.h" SYSCTL_DECL(_hw_vmm); diff --git a/sys/amd64/vmm/amd/svm.c b/sys/amd64/vmm/amd/svm.c index ca5141a1a782..bd70d1135b44 100644 --- a/sys/amd64/vmm/amd/svm.c +++ b/sys/amd64/vmm/amd/svm.c @@ -41,7 +41,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include diff --git a/sys/arm/annapurna/alpine/alpine_machdep.c b/sys/arm/annapurna/alpine/alpine_machdep.c index dc6d3a4245db..54e26bf643e7 100644 --- a/sys/arm/annapurna/alpine/alpine_machdep.c +++ b/sys/arm/annapurna/alpine/alpine_machdep.c @@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$"); #include #include /* For trapframe_t, used in */ #include -#include #include #include #include diff --git a/sys/arm/arm/mp_machdep.c b/sys/arm/arm/mp_machdep.c index 8643860792d5..ed5714eadea2 100644 --- a/sys/arm/arm/mp_machdep.c +++ b/sys/arm/arm/mp_machdep.c @@ -50,7 +50,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c index 678f1b592574..a02db58f2ae0 100644 --- a/sys/arm64/arm64/minidump_machdep.c +++ b/sys/arm64/arm64/minidump_machdep.c @@ -52,7 +52,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include diff --git a/sys/arm64/cloudabi64/cloudabi64_sysvec.c b/sys/arm64/cloudabi64/cloudabi64_sysvec.c index 17fa0d4f5b62..10b98209a291 100644 --- a/sys/arm64/cloudabi64/cloudabi64_sysvec.c +++ b/sys/arm64/cloudabi64/cloudabi64_sysvec.c @@ -36,7 +36,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h index da08751d31de..f18f53dd654d 100644 --- a/sys/compat/linuxkpi/common/include/linux/dma-mapping.h +++ b/sys/compat/linuxkpi/common/include/linux/dma-mapping.h @@ -47,7 +47,6 @@ #include #include -#include enum dma_data_direction { DMA_BIDIRECTIONAL = 0, diff --git a/sys/compat/linuxkpi/common/include/linux/list.h b/sys/compat/linuxkpi/common/include/linux/list.h index f20c8632ba27..63e8af518621 100644 --- a/sys/compat/linuxkpi/common/include/linux/list.h +++ b/sys/compat/linuxkpi/common/include/linux/list.h @@ -68,7 +68,6 @@ #include #include #include -#include #define prefetch(x) diff --git a/sys/compat/linuxkpi/common/src/linux_compat.c b/sys/compat/linuxkpi/common/src/linux_compat.c index 9c736647bfd6..b8a080151292 100644 --- a/sys/compat/linuxkpi/common/src/linux_compat.c +++ b/sys/compat/linuxkpi/common/src/linux_compat.c @@ -50,7 +50,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/compat/linuxkpi/common/src/linux_pci.c b/sys/compat/linuxkpi/common/src/linux_pci.c index 74d4f7711297..0214e32bbb8b 100644 --- a/sys/compat/linuxkpi/common/src/linux_pci.c +++ b/sys/compat/linuxkpi/common/src/linux_pci.c @@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/dev/ce/if_ce.c b/sys/dev/ce/if_ce.c index 8ef3e1ef3884..85b9fce33017 100644 --- a/sys/dev/ce/if_ce.c +++ b/sys/dev/ce/if_ce.c @@ -76,7 +76,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR diff --git a/sys/dev/cp/if_cp.c b/sys/dev/cp/if_cp.c index 44791d022101..f1a120f52feb 100644 --- a/sys/dev/cp/if_cp.c +++ b/sys/dev/cp/if_cp.c @@ -67,7 +67,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include /* If we don't have Cronyx's sppp version, we don't have fr support via sppp */ #ifndef PP_FR diff --git a/sys/dev/drm/drmP.h b/sys/dev/drm/drmP.h index dc7d80404e53..33da6ff6e592 100644 --- a/sys/dev/drm/drmP.h +++ b/sys/dev/drm/drmP.h @@ -76,7 +76,6 @@ struct drm_file; #include #include #include -#include #include #include #if defined(__i386__) || defined(__amd64__) diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h index b3633c364913..86c7f94c3c3d 100644 --- a/sys/dev/drm2/drmP.h +++ b/sys/dev/drm2/drmP.h @@ -77,7 +77,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #if defined(__i386__) || defined(__amd64__) diff --git a/sys/dev/fb/machfb.c b/sys/dev/fb/machfb.c index 368ff6a6a499..de1e98e9d66e 100644 --- a/sys/dev/fb/machfb.c +++ b/sys/dev/fb/machfb.c @@ -56,7 +56,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/dev/isci/isci_oem_parameters.c b/sys/dev/isci/isci_oem_parameters.c index d17698f1cdec..83234a2cce8c 100644 --- a/sys/dev/isci/isci_oem_parameters.c +++ b/sys/dev/isci/isci_oem_parameters.c @@ -34,7 +34,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/dev/ntb/if_ntb/if_ntb.c b/sys/dev/ntb/if_ntb/if_ntb.c index b28502f1d832..6684edd7d2f7 100644 --- a/sys/dev/ntb/if_ntb/if_ntb.c +++ b/sys/dev/ntb/if_ntb/if_ntb.c @@ -57,7 +57,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/dev/ntb/ntb_hw/ntb_hw.c b/sys/dev/ntb/ntb_hw/ntb_hw.c index 90b27a3f8b1f..cb8f27cd46ec 100644 --- a/sys/dev/ntb/ntb_hw/ntb_hw.c +++ b/sys/dev/ntb/ntb_hw/ntb_hw.c @@ -45,7 +45,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c b/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c index 0a500cb23bb6..fcdf6a39ecf8 100644 --- a/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c +++ b/sys/dev/pms/freebsd/driver/ini/src/agtiapi.c @@ -43,7 +43,6 @@ __FBSDID("$FreeBSD$"); #include #include // 1. for vtophys #include // 2. for vtophys -#include // 3. for vtophys (yes, three) #include // For pci_get macros #include #include diff --git a/sys/dev/rt/if_rt.c b/sys/dev/rt/if_rt.c index 3d3a3f35623a..8057b2c6dcb4 100644 --- a/sys/dev/rt/if_rt.c +++ b/sys/dev/rt/if_rt.c @@ -54,7 +54,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/dev/siba/siba_pcib.c b/sys/dev/siba/siba_pcib.c index cd755c030187..5780e62e8f29 100644 --- a/sys/dev/siba/siba_pcib.c +++ b/sys/dev/siba/siba_pcib.c @@ -47,7 +47,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/dev/vt/hw/efifb/efifb.c b/sys/dev/vt/hw/efifb/efifb.c index 4184f776271d..bd983e37c686 100644 --- a/sys/dev/vt/hw/efifb/efifb.c +++ b/sys/dev/vt/hw/efifb/efifb.c @@ -43,7 +43,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/i386/bios/mca_machdep.c b/sys/i386/bios/mca_machdep.c index 4b78c13693a9..e511f3ff786c 100644 --- a/sys/i386/bios/mca_machdep.c +++ b/sys/i386/bios/mca_machdep.c @@ -33,7 +33,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/i386/pci/pci_cfgreg.c b/sys/i386/pci/pci_cfgreg.c index 2716a7a23ce6..288bcb9dd8fd 100644 --- a/sys/i386/pci/pci_cfgreg.c +++ b/sys/i386/pci/pci_cfgreg.c @@ -51,7 +51,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #ifdef XBOX #include diff --git a/sys/mips/adm5120/admpci.c b/sys/mips/adm5120/admpci.c index 9b36d37f3f15..0fb0b54ca5cd 100644 --- a/sys/mips/adm5120/admpci.c +++ b/sys/mips/adm5120/admpci.c @@ -79,7 +79,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/mips/atheros/ar71xx_fixup.c b/sys/mips/atheros/ar71xx_fixup.c index 8ab88559f15c..27747bdb4892 100644 --- a/sys/mips/atheros/ar71xx_fixup.c +++ b/sys/mips/atheros/ar71xx_fixup.c @@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/mips/atheros/ar71xx_pci.c b/sys/mips/atheros/ar71xx_pci.c index 183f8841fee7..f8fc255f1aec 100644 --- a/sys/mips/atheros/ar71xx_pci.c +++ b/sys/mips/atheros/ar71xx_pci.c @@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/mips/atheros/ar71xx_spi.c b/sys/mips/atheros/ar71xx_spi.c index a7fc9935d0ad..943a9d8ab9a8 100644 --- a/sys/mips/atheros/ar71xx_spi.c +++ b/sys/mips/atheros/ar71xx_spi.c @@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/mips/atheros/ar724x_pci.c b/sys/mips/atheros/ar724x_pci.c index 3b01801d04a9..12419d16ad02 100644 --- a/sys/mips/atheros/ar724x_pci.c +++ b/sys/mips/atheros/ar724x_pci.c @@ -48,7 +48,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/mips/atheros/if_arge.c b/sys/mips/atheros/if_arge.c index 792ca5d2e41a..b648eea2cd36 100644 --- a/sys/mips/atheros/if_arge.c +++ b/sys/mips/atheros/if_arge.c @@ -65,7 +65,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/mips/atheros/qca955x_pci.c b/sys/mips/atheros/qca955x_pci.c index 626ca377dfc9..b02517d3d3ae 100644 --- a/sys/mips/atheros/qca955x_pci.c +++ b/sys/mips/atheros/qca955x_pci.c @@ -49,7 +49,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/mips/cavium/cvmx_config.h b/sys/mips/cavium/cvmx_config.h index aed3f737ce07..e1976608e54d 100644 --- a/sys/mips/cavium/cvmx_config.h +++ b/sys/mips/cavium/cvmx_config.h @@ -50,7 +50,6 @@ #include #include -#include #include #define asm __asm diff --git a/sys/mips/cavium/octopci.c b/sys/mips/cavium/octopci.c index 17a63420f70d..25bf3b879dd5 100644 --- a/sys/mips/cavium/octopci.c +++ b/sys/mips/cavium/octopci.c @@ -46,7 +46,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/mips/idt/idtpci.c b/sys/mips/idt/idtpci.c index 46c276ae329e..98d199697c71 100644 --- a/sys/mips/idt/idtpci.c +++ b/sys/mips/idt/idtpci.c @@ -80,7 +80,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/mips/malta/gt_pci.c b/sys/mips/malta/gt_pci.c index 5262e66548d2..946c956504f1 100644 --- a/sys/mips/malta/gt_pci.c +++ b/sys/mips/malta/gt_pci.c @@ -59,7 +59,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include diff --git a/sys/mips/mips/minidump_machdep.c b/sys/mips/mips/minidump_machdep.c index 2122e00526f8..d9e3b47c555e 100644 --- a/sys/mips/mips/minidump_machdep.c +++ b/sys/mips/mips/minidump_machdep.c @@ -39,7 +39,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/mips/mips/nexus.c b/sys/mips/mips/nexus.c index ba7db318df88..88a1c20b4c1a 100644 --- a/sys/mips/mips/nexus.c +++ b/sys/mips/mips/nexus.c @@ -54,7 +54,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/mips/nlm/xlp_simplebus.c b/sys/mips/nlm/xlp_simplebus.c index 790955bb70f6..353cf61e47b1 100644 --- a/sys/mips/nlm/xlp_simplebus.c +++ b/sys/mips/nlm/xlp_simplebus.c @@ -41,7 +41,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/mips/rt305x/rt305x_pci.c b/sys/mips/rt305x/rt305x_pci.c index 68a9d8679aef..605a83f8b241 100644 --- a/sys/mips/rt305x/rt305x_pci.c +++ b/sys/mips/rt305x/rt305x_pci.c @@ -54,7 +54,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/mips/sibyte/sb_zbpci.c b/sys/mips/sibyte/sb_zbpci.c index df3c4b8ee64b..e3d7ddeff374 100644 --- a/sys/mips/sibyte/sb_zbpci.c +++ b/sys/mips/sibyte/sb_zbpci.c @@ -44,7 +44,6 @@ #include #include -#include #include #include diff --git a/sys/powerpc/aim/slb.c b/sys/powerpc/aim/slb.c index aa6a21438d7e..72a595be6c70 100644 --- a/sys/powerpc/aim/slb.c +++ b/sys/powerpc/aim/slb.c @@ -44,7 +44,6 @@ #include #include -#include #include uintptr_t moea64_get_unique_vsid(void); diff --git a/sys/powerpc/ofw/ofw_real.c b/sys/powerpc/ofw/ofw_real.c index 86c8afdbb3b4..ddb7d74869be 100644 --- a/sys/powerpc/ofw/ofw_real.c +++ b/sys/powerpc/ofw/ofw_real.c @@ -71,7 +71,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/powerpc/ofw/rtas.c b/sys/powerpc/ofw/rtas.c index 5dff8efcdac2..6767c0084814 100644 --- a/sys/powerpc/ofw/rtas.c +++ b/sys/powerpc/ofw/rtas.c @@ -41,7 +41,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/powerpc/powermac/macgpio.c b/sys/powerpc/powermac/macgpio.c index 8268038b0d90..6b67514f1f3b 100644 --- a/sys/powerpc/powermac/macgpio.c +++ b/sys/powerpc/powermac/macgpio.c @@ -42,7 +42,6 @@ #include #include -#include #include #include diff --git a/sys/powerpc/powermac/macio.c b/sys/powerpc/powermac/macio.c index 669e907d0772..285d62744c47 100644 --- a/sys/powerpc/powermac/macio.c +++ b/sys/powerpc/powermac/macio.c @@ -44,7 +44,6 @@ #include #include -#include #include #include diff --git a/sys/powerpc/powermac/platform_powermac.c b/sys/powerpc/powermac/platform_powermac.c index f6e9d9932c52..34aacea9b373 100644 --- a/sys/powerpc/powermac/platform_powermac.c +++ b/sys/powerpc/powermac/platform_powermac.c @@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$"); #include /* For save_fpu() */ #include #include -#include #include #include #include diff --git a/sys/powerpc/powerpc/genassym.c b/sys/powerpc/powerpc/genassym.c index 44c0280de016..1a8cb560d187 100644 --- a/sys/powerpc/powerpc/genassym.c +++ b/sys/powerpc/powerpc/genassym.c @@ -52,7 +52,6 @@ #include #include -#include #include #include diff --git a/sys/powerpc/powerpc/trap.c b/sys/powerpc/powerpc/trap.c index d4dac2ee278a..62d91a677973 100644 --- a/sys/powerpc/powerpc/trap.c +++ b/sys/powerpc/powerpc/trap.c @@ -68,7 +68,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/powerpc/ps3/if_glc.c b/sys/powerpc/ps3/if_glc.c index a22429744158..3b13ac0898d4 100644 --- a/sys/powerpc/ps3/if_glc.c +++ b/sys/powerpc/ps3/if_glc.c @@ -51,7 +51,6 @@ #include #include #include -#include #include #include #include diff --git a/sys/powerpc/ps3/platform_ps3.c b/sys/powerpc/ps3/platform_ps3.c index 4afa2dcd443e..c36603d43605 100644 --- a/sys/powerpc/ps3/platform_ps3.c +++ b/sys/powerpc/ps3/platform_ps3.c @@ -44,7 +44,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/powerpc/ps3/ps3_syscons.c b/sys/powerpc/ps3/ps3_syscons.c index 4edf56eb513f..597a3b62c7c8 100644 --- a/sys/powerpc/ps3/ps3_syscons.c +++ b/sys/powerpc/ps3/ps3_syscons.c @@ -40,7 +40,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/sys/powerpc/ps3/ps3bus.c b/sys/powerpc/ps3/ps3bus.c index 15817fdad6e9..bbb40c8fe5d6 100644 --- a/sys/powerpc/ps3/ps3bus.c +++ b/sys/powerpc/ps3/ps3bus.c @@ -43,7 +43,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include "ps3bus.h" diff --git a/sys/powerpc/ps3/ps3cdrom.c b/sys/powerpc/ps3/ps3cdrom.c index afbe0f7d9a7d..841217a44c73 100644 --- a/sys/powerpc/ps3/ps3cdrom.c +++ b/sys/powerpc/ps3/ps3cdrom.c @@ -46,7 +46,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/powerpc/ps3/ps3disk.c b/sys/powerpc/ps3/ps3disk.c index d4ac7fa35d9b..d68caea471e5 100644 --- a/sys/powerpc/ps3/ps3disk.c +++ b/sys/powerpc/ps3/ps3disk.c @@ -47,7 +47,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/powerpc/pseries/platform_chrp.c b/sys/powerpc/pseries/platform_chrp.c index ba43783e7b77..17916edfbb0b 100644 --- a/sys/powerpc/pseries/platform_chrp.c +++ b/sys/powerpc/pseries/platform_chrp.c @@ -42,7 +42,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #include diff --git a/sys/powerpc/psim/iobus.c b/sys/powerpc/psim/iobus.c index 21f53f1f11d4..741b62aae47f 100644 --- a/sys/powerpc/psim/iobus.c +++ b/sys/powerpc/psim/iobus.c @@ -50,7 +50,6 @@ #include #include #include -#include #include diff --git a/sys/sparc64/pci/fire.c b/sys/sparc64/pci/fire.c index c2d22853e4c5..e06ad50958e9 100644 --- a/sys/sparc64/pci/fire.c +++ b/sys/sparc64/pci/fire.c @@ -69,7 +69,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/sparc64/sparc64/iommu.c b/sys/sparc64/sparc64/iommu.c index 42aa258e88e1..83e03d2ee522 100644 --- a/sys/sparc64/sparc64/iommu.c +++ b/sys/sparc64/sparc64/iommu.c @@ -101,7 +101,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/sparc64/sparc64/tlb.c b/sys/sparc64/sparc64/tlb.c index 70b28239d4f8..8adffc648331 100644 --- a/sys/sparc64/sparc64/tlb.c +++ b/sys/sparc64/sparc64/tlb.c @@ -41,7 +41,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include diff --git a/sys/x86/acpica/acpi_wakeup.c b/sys/x86/acpica/acpi_wakeup.c index bc1c55caf955..e05d54298ee0 100644 --- a/sys/x86/acpica/acpi_wakeup.c +++ b/sys/x86/acpica/acpi_wakeup.c @@ -56,7 +56,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/x86/x86/nexus.c b/sys/x86/x86/nexus.c index 9f68e506e2ee..ab006136791d 100644 --- a/sys/x86/x86/nexus.c +++ b/sys/x86/x86/nexus.c @@ -63,7 +63,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include -- cgit v1.2.3 From d6849317c579f3a976e8ed3a1708e1e17c813ad9 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Mon, 22 Feb 2016 09:04:36 +0000 Subject: As is included from , there is no need to include it explicitly when is already included. Reviewed by: alc, kib Differential Revision: https://reviews.freebsd.org/D5378 --- lib/libvmmapi/vmmapi.c | 1 - share/man/man9/bios.9 | 1 - sys/arm/arm/debug_monitor.c | 1 - sys/arm64/arm64/debug_monitor.c | 1 - sys/dev/drm/drmP.h | 1 - sys/dev/drm2/drmP.h | 1 - sys/mips/mips/stack_machdep.c | 1 - sys/mips/nlm/cms.c | 1 - sys/mips/nlm/dev/net/xlpge.c | 1 - sys/mips/rmi/dev/nlge/if_nlge.c | 1 - sys/mips/rmi/fmn.c | 1 - sys/mips/rmi/iodi.c | 1 - 12 files changed, 12 deletions(-) diff --git a/lib/libvmmapi/vmmapi.c b/lib/libvmmapi/vmmapi.c index fb8eb787a1d6..3a6f210e805d 100644 --- a/lib/libvmmapi/vmmapi.c +++ b/lib/libvmmapi/vmmapi.c @@ -38,7 +38,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include diff --git a/share/man/man9/bios.9 b/share/man/man9/bios.9 index d5e764b06cfd..e6774dfbc0a6 100644 --- a/share/man/man9/bios.9 +++ b/share/man/man9/bios.9 @@ -37,7 +37,6 @@ .In sys/param.h .In vm/vm.h .In vm/pmap.h -.In machine/param.h .In machine/pc/bios.h .Ft uint32_t .Fn bios_sigsearch "uint32_t start" "u_char *sig" "int siglen" "int paralen" "int sigofs" diff --git a/sys/arm/arm/debug_monitor.c b/sys/arm/arm/debug_monitor.c index eaf88e9d1f07..ab9b46bddcc9 100644 --- a/sys/arm/arm/debug_monitor.c +++ b/sys/arm/arm/debug_monitor.c @@ -43,7 +43,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/arm64/arm64/debug_monitor.c b/sys/arm64/arm64/debug_monitor.c index 50d663da4b09..46167df88084 100644 --- a/sys/arm64/arm64/debug_monitor.c +++ b/sys/arm64/arm64/debug_monitor.c @@ -40,7 +40,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/dev/drm/drmP.h b/sys/dev/drm/drmP.h index 33da6ff6e592..8c4ece5c19e7 100644 --- a/sys/dev/drm/drmP.h +++ b/sys/dev/drm/drmP.h @@ -75,7 +75,6 @@ struct drm_file; #include #include #include -#include #include #include #if defined(__i386__) || defined(__amd64__) diff --git a/sys/dev/drm2/drmP.h b/sys/dev/drm2/drmP.h index 86c7f94c3c3d..ae12144cf01f 100644 --- a/sys/dev/drm2/drmP.h +++ b/sys/dev/drm2/drmP.h @@ -76,7 +76,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include #if defined(__i386__) || defined(__amd64__) diff --git a/sys/mips/mips/stack_machdep.c b/sys/mips/mips/stack_machdep.c index 9b724cb3fc90..0f7c4bb1840f 100644 --- a/sys/mips/mips/stack_machdep.c +++ b/sys/mips/mips/stack_machdep.c @@ -35,7 +35,6 @@ __FBSDID("$FreeBSD$"); #include -#include #include #include diff --git a/sys/mips/nlm/cms.c b/sys/mips/nlm/cms.c index a4b724489dbe..02431d6696e4 100644 --- a/sys/mips/nlm/cms.c +++ b/sys/mips/nlm/cms.c @@ -53,7 +53,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/mips/nlm/dev/net/xlpge.c b/sys/mips/nlm/dev/net/xlpge.c index aaa637990f53..4f3aeb0d0416 100644 --- a/sys/mips/nlm/dev/net/xlpge.c +++ b/sys/mips/nlm/dev/net/xlpge.c @@ -72,7 +72,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include /* for DELAY */ #include diff --git a/sys/mips/rmi/dev/nlge/if_nlge.c b/sys/mips/rmi/dev/nlge/if_nlge.c index 76a63c33d04b..9fa95c7f6a78 100644 --- a/sys/mips/rmi/dev/nlge/if_nlge.c +++ b/sys/mips/rmi/dev/nlge/if_nlge.c @@ -94,7 +94,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include /* for DELAY */ #include diff --git a/sys/mips/rmi/fmn.c b/sys/mips/rmi/fmn.c index 4c0d5df79905..b9c596592f4c 100644 --- a/sys/mips/rmi/fmn.c +++ b/sys/mips/rmi/fmn.c @@ -53,7 +53,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include diff --git a/sys/mips/rmi/iodi.c b/sys/mips/rmi/iodi.c index c9c5c459e8e4..da0405d3d288 100644 --- a/sys/mips/rmi/iodi.c +++ b/sys/mips/rmi/iodi.c @@ -48,7 +48,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include /* for DELAY */ #include -- cgit v1.2.3 From 35a0bc126056419b34b7bad9a8488ac20dcdbc8e Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Mon, 22 Feb 2016 09:08:04 +0000 Subject: As is included from , there is no need to include it explicitly when is already included. Suggested by: alc Reviewed by: alc Differential Revision: https://reviews.freebsd.org/D5379 --- sys/amd64/amd64/minidump_machdep.c | 1 - sys/amd64/vmm/vmm.c | 1 - sys/arm/arm/genassym.c | 1 - sys/arm/arm/pmap-v6.c | 1 - sys/arm/arm/trap-v6.c | 1 - sys/arm64/arm64/minidump_machdep.c | 1 - sys/arm64/arm64/trap.c | 1 - sys/cddl/dev/dtrace/aarch64/dtrace_isa.c | 1 - sys/cddl/dev/dtrace/arm/dtrace_isa.c | 1 - sys/cddl/dev/dtrace/mips/dtrace_isa.c | 1 - sys/compat/linux/linux_util.h | 1 - sys/compat/svr4/svr4_misc.c | 1 - sys/compat/svr4/svr4_util.h | 1 - sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c | 1 - sys/riscv/riscv/genassym.c | 1 - sys/riscv/riscv/minidump_machdep.c | 1 - sys/riscv/riscv/trap.c | 1 - 17 files changed, 17 deletions(-) diff --git a/sys/amd64/amd64/minidump_machdep.c b/sys/amd64/amd64/minidump_machdep.c index 61b348ea325c..cc32cdc21007 100644 --- a/sys/amd64/amd64/minidump_machdep.c +++ b/sys/amd64/amd64/minidump_machdep.c @@ -46,7 +46,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c index 098705909953..b98152089db7 100644 --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -58,7 +58,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/arm/arm/genassym.c b/sys/arm/arm/genassym.c index 7b742bf512bd..f9cb23e810f2 100644 --- a/sys/arm/arm/genassym.c +++ b/sys/arm/arm/genassym.c @@ -40,7 +40,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include diff --git a/sys/arm/arm/pmap-v6.c b/sys/arm/arm/pmap-v6.c index bb1b0ba1e79c..609b291c1f60 100644 --- a/sys/arm/arm/pmap-v6.c +++ b/sys/arm/arm/pmap-v6.c @@ -121,7 +121,6 @@ __FBSDID("$FreeBSD$"); #endif #include -#include #include #include diff --git a/sys/arm/arm/trap-v6.c b/sys/arm/arm/trap-v6.c index 81a6ee428f6f..d0819172a828 100644 --- a/sys/arm/arm/trap-v6.c +++ b/sys/arm/arm/trap-v6.c @@ -58,7 +58,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #ifdef KDB #include diff --git a/sys/arm64/arm64/minidump_machdep.c b/sys/arm64/arm64/minidump_machdep.c index a02db58f2ae0..56d1713c9f1b 100644 --- a/sys/arm64/arm64/minidump_machdep.c +++ b/sys/arm64/arm64/minidump_machdep.c @@ -53,7 +53,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); diff --git a/sys/arm64/arm64/trap.c b/sys/arm64/arm64/trap.c index 10e3f416c2a1..1db574273bd0 100644 --- a/sys/arm64/arm64/trap.c +++ b/sys/arm64/arm64/trap.c @@ -52,7 +52,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #ifdef KDTRACE_HOOKS #include diff --git a/sys/cddl/dev/dtrace/aarch64/dtrace_isa.c b/sys/cddl/dev/dtrace/aarch64/dtrace_isa.c index dcda440e8799..3d7e0444e785 100644 --- a/sys/cddl/dev/dtrace/aarch64/dtrace_isa.c +++ b/sys/cddl/dev/dtrace/aarch64/dtrace_isa.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include diff --git a/sys/cddl/dev/dtrace/arm/dtrace_isa.c b/sys/cddl/dev/dtrace/arm/dtrace_isa.c index 8e56a44165d9..9bac37669a84 100644 --- a/sys/cddl/dev/dtrace/arm/dtrace_isa.c +++ b/sys/cddl/dev/dtrace/arm/dtrace_isa.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include diff --git a/sys/cddl/dev/dtrace/mips/dtrace_isa.c b/sys/cddl/dev/dtrace/mips/dtrace_isa.c index 4fa03d5c7145..c09225c734b9 100644 --- a/sys/cddl/dev/dtrace/mips/dtrace_isa.c +++ b/sys/cddl/dev/dtrace/mips/dtrace_isa.c @@ -44,7 +44,6 @@ #include #include #include -#include #include #include #include diff --git a/sys/compat/linux/linux_util.h b/sys/compat/linux/linux_util.h index a52a7b91d551..7b389a00ff79 100644 --- a/sys/compat/linux/linux_util.h +++ b/sys/compat/linux/linux_util.h @@ -37,7 +37,6 @@ #include #include #include -#include #include #include #include diff --git a/sys/compat/svr4/svr4_misc.c b/sys/compat/svr4/svr4_misc.c index b560764ca8e2..aaed81f42289 100644 --- a/sys/compat/svr4/svr4_misc.c +++ b/sys/compat/svr4/svr4_misc.c @@ -84,7 +84,6 @@ __FBSDID("$FreeBSD$"); #include -#include #include #include #include diff --git a/sys/compat/svr4/svr4_util.h b/sys/compat/svr4/svr4_util.h index 4cbf230cf526..3fc245111283 100644 --- a/sys/compat/svr4/svr4_util.h +++ b/sys/compat/svr4/svr4_util.h @@ -35,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c index 5c4476f03fed..b692ecb4e872 100644 --- a/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c +++ b/sys/dev/hyperv/netvsc/hv_netvsc_drv_freebsd.c @@ -100,7 +100,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include diff --git a/sys/riscv/riscv/genassym.c b/sys/riscv/riscv/genassym.c index bf6c8fb4c851..c5dec83d9695 100644 --- a/sys/riscv/riscv/genassym.c +++ b/sys/riscv/riscv/genassym.c @@ -47,7 +47,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include diff --git a/sys/riscv/riscv/minidump_machdep.c b/sys/riscv/riscv/minidump_machdep.c index ca51cfc77e50..235d50f1d0bd 100644 --- a/sys/riscv/riscv/minidump_machdep.c +++ b/sys/riscv/riscv/minidump_machdep.c @@ -45,7 +45,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include CTASSERT(sizeof(struct kerneldumpheader) == 512); diff --git a/sys/riscv/riscv/trap.c b/sys/riscv/riscv/trap.c index c192b3135feb..7ac30c0b087d 100644 --- a/sys/riscv/riscv/trap.c +++ b/sys/riscv/riscv/trap.c @@ -57,7 +57,6 @@ __FBSDID("$FreeBSD$"); #include #include #include -#include #include #include -- cgit v1.2.3 From b352b10400358a629fa59937c1ccd6f0e68bb196 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Mon, 22 Feb 2016 09:10:23 +0000 Subject: As is included from , there is no need to include it explicitly when is already included. Reviewed by: alc, kib Differential Revision: https://reviews.freebsd.org/D5380 --- sys/amd64/vmm/vmm.c | 1 - sys/arm/mv/mvvar.h | 1 - sys/dev/altera/avgen/altera_avgen.c | 1 - sys/dev/altera/avgen/altera_avgen_fdt.c | 1 - sys/dev/altera/avgen/altera_avgen_nexus.c | 1 - sys/dev/vt/hw/efifb/efifb.c | 1 - 6 files changed, 6 deletions(-) diff --git a/sys/amd64/vmm/vmm.c b/sys/amd64/vmm/vmm.c index b98152089db7..71eeb22a7bf0 100644 --- a/sys/amd64/vmm/vmm.c +++ b/sys/amd64/vmm/vmm.c @@ -53,7 +53,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include #include #include diff --git a/sys/arm/mv/mvvar.h b/sys/arm/mv/mvvar.h index 806d520f258a..898b74e5abeb 100644 --- a/sys/arm/mv/mvvar.h +++ b/sys/arm/mv/mvvar.h @@ -44,7 +44,6 @@ #include #include #include -#include #include diff --git a/sys/dev/altera/avgen/altera_avgen.c b/sys/dev/altera/avgen/altera_avgen.c index 98c5d7722505..f880e8608dcd 100644 --- a/sys/dev/altera/avgen/altera_avgen.c +++ b/sys/dev/altera/avgen/altera_avgen.c @@ -47,7 +47,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include diff --git a/sys/dev/altera/avgen/altera_avgen_fdt.c b/sys/dev/altera/avgen/altera_avgen_fdt.c index d87027d2effd..09fba3960edf 100644 --- a/sys/dev/altera/avgen/altera_avgen_fdt.c +++ b/sys/dev/altera/avgen/altera_avgen_fdt.c @@ -47,7 +47,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include diff --git a/sys/dev/altera/avgen/altera_avgen_nexus.c b/sys/dev/altera/avgen/altera_avgen_nexus.c index 45e263c30420..c3daab736623 100644 --- a/sys/dev/altera/avgen/altera_avgen_nexus.c +++ b/sys/dev/altera/avgen/altera_avgen_nexus.c @@ -47,7 +47,6 @@ __FBSDID("$FreeBSD$"); #include #include -#include #include diff --git a/sys/dev/vt/hw/efifb/efifb.c b/sys/dev/vt/hw/efifb/efifb.c index bd983e37c686..27c10eeda692 100644 --- a/sys/dev/vt/hw/efifb/efifb.c +++ b/sys/dev/vt/hw/efifb/efifb.c @@ -39,7 +39,6 @@ __FBSDID("$FreeBSD$"); #include "opt_platform.h" #include -#include #include #include #include -- cgit v1.2.3 From 7758916f4401f98c1812ecbfcaf2f8eef1670177 Mon Sep 17 00:00:00 2001 From: Svatopluk Kraus Date: Mon, 22 Feb 2016 11:47:28 +0000 Subject: Move ARM_L2_PIPT option to std.armv6 for all armv6 platforms. Only L2 PIPT cache is supported for __ARM_ARCH >= 6. In fact, this is just a pure proclamation as this option is used only in armv4 specific files now. --- sys/arm/allwinner/a20/std.a20 | 2 -- sys/arm/allwinner/std.a10 | 2 -- sys/arm/altera/socfpga/std.socfpga | 2 -- sys/arm/amlogic/aml8726/std.aml8726 | 2 -- sys/arm/annapurna/alpine/std.alpine | 2 -- sys/arm/broadcom/bcm2835/std.bcm2836 | 1 - sys/arm/conf/std.armv6 | 2 ++ sys/arm/freescale/imx/std.imx51 | 1 - sys/arm/freescale/imx/std.imx53 | 1 - sys/arm/freescale/imx/std.imx6 | 1 - sys/arm/freescale/vybrid/std.vybrid | 2 -- sys/arm/mv/armadaxp/std.armadaxp | 2 -- sys/arm/qemu/std.virt | 1 - sys/arm/rockchip/std.rk30xx | 2 -- sys/arm/samsung/exynos/std.exynos5250 | 2 -- sys/arm/samsung/exynos/std.exynos5420 | 2 -- sys/arm/ti/am335x/std.am335x | 2 -- sys/arm/ti/omap4/std.omap4 | 2 -- sys/arm/xilinx/std.zynq7 | 2 -- 19 files changed, 2 insertions(+), 31 deletions(-) diff --git a/sys/arm/allwinner/a20/std.a20 b/sys/arm/allwinner/a20/std.a20 index cd72d2bae6b5..53cef037e460 100644 --- a/sys/arm/allwinner/a20/std.a20 +++ b/sys/arm/allwinner/a20/std.a20 @@ -8,8 +8,6 @@ makeoptions CONF_CFLAGS="-march=armv7a" makeoptions KERNVIRTADDR=0xc0200000 options KERNVIRTADDR=0xc0200000 -options ARM_L2_PIPT - options IPI_IRQ_START=0 options IPI_IRQ_END=15 diff --git a/sys/arm/allwinner/std.a10 b/sys/arm/allwinner/std.a10 index 24d359116832..8b8e2a3c14ae 100644 --- a/sys/arm/allwinner/std.a10 +++ b/sys/arm/allwinner/std.a10 @@ -8,7 +8,5 @@ makeoptions CONF_CFLAGS="-march=armv7a" makeoptions KERNVIRTADDR=0xc0200000 options KERNVIRTADDR=0xc0200000 -options ARM_L2_PIPT - files "../allwinner/files.allwinner" files "../allwinner/files.a10" diff --git a/sys/arm/altera/socfpga/std.socfpga b/sys/arm/altera/socfpga/std.socfpga index 8915fee9c2c8..687c5a7f7a22 100644 --- a/sys/arm/altera/socfpga/std.socfpga +++ b/sys/arm/altera/socfpga/std.socfpga @@ -7,8 +7,6 @@ makeoptions CONF_CFLAGS="-march=armv7a" makeoptions KERNVIRTADDR=0xc0f00000 options KERNVIRTADDR=0xc0f00000 -options ARM_L2_PIPT - options IPI_IRQ_START=0 options IPI_IRQ_END=15 diff --git a/sys/arm/amlogic/aml8726/std.aml8726 b/sys/arm/amlogic/aml8726/std.aml8726 index 61b515f75f9c..bd1c12e1c417 100644 --- a/sys/arm/amlogic/aml8726/std.aml8726 +++ b/sys/arm/amlogic/aml8726/std.aml8726 @@ -17,8 +17,6 @@ device fdt_pinctrl files "../amlogic/aml8726/files.aml8726" -options ARM_L2_PIPT - # Set all global interrupts to be edge triggered, active high. options GIC_DEFAULT_ICFGR_INIT=0xffffffff diff --git a/sys/arm/annapurna/alpine/std.alpine b/sys/arm/annapurna/alpine/std.alpine index f66118b127e3..002b4ce5fa78 100644 --- a/sys/arm/annapurna/alpine/std.alpine +++ b/sys/arm/annapurna/alpine/std.alpine @@ -10,8 +10,6 @@ options KERNVIRTADDR=0xa0200000 makeoptions KERNBASE=0xa0000000 options KERNBASE=0xa0000000 -options ARM_L2_PIPT - options IPI_IRQ_START=0 options IPI_IRQ_END=15 diff --git a/sys/arm/broadcom/bcm2835/std.bcm2836 b/sys/arm/broadcom/bcm2835/std.bcm2836 index 862be752338a..789450f4fab9 100644 --- a/sys/arm/broadcom/bcm2835/std.bcm2836 +++ b/sys/arm/broadcom/bcm2835/std.bcm2836 @@ -5,7 +5,6 @@ cpu CPU_CORTEXA makeoptions CONF_CFLAGS="-march=armv7a" options SOC_BCM2836 -options ARM_L2_PIPT options IPI_IRQ_START=76 files "../broadcom/bcm2835/files.bcm2836" diff --git a/sys/arm/conf/std.armv6 b/sys/arm/conf/std.armv6 index 35f60a3345e7..142c7d3f7dfd 100644 --- a/sys/arm/conf/std.armv6 +++ b/sys/arm/conf/std.armv6 @@ -2,6 +2,8 @@ # # $FreeBSD$ +options ARM_L2_PIPT # Only L2 PIPT is supported + options PREEMPTION # Enable kernel thread preemption options INET # InterNETworking options INET6 # IPv6 communications protocols diff --git a/sys/arm/freescale/imx/std.imx51 b/sys/arm/freescale/imx/std.imx51 index 1c375fcc8157..4f9ac1454b9a 100644 --- a/sys/arm/freescale/imx/std.imx51 +++ b/sys/arm/freescale/imx/std.imx51 @@ -2,7 +2,6 @@ machine arm armv6 cpu CPU_CORTEXA makeoptions CONF_CFLAGS="-march=armv7a" -options ARM_L2_PIPT options KERNVIRTADDR=0xc0100000 makeoptions KERNVIRTADDR=0xc0100000 diff --git a/sys/arm/freescale/imx/std.imx53 b/sys/arm/freescale/imx/std.imx53 index 1c375fcc8157..4f9ac1454b9a 100644 --- a/sys/arm/freescale/imx/std.imx53 +++ b/sys/arm/freescale/imx/std.imx53 @@ -2,7 +2,6 @@ machine arm armv6 cpu CPU_CORTEXA makeoptions CONF_CFLAGS="-march=armv7a" -options ARM_L2_PIPT options KERNVIRTADDR=0xc0100000 makeoptions KERNVIRTADDR=0xc0100000 diff --git a/sys/arm/freescale/imx/std.imx6 b/sys/arm/freescale/imx/std.imx6 index c7232a5c76b0..5c6c39c2f6b7 100644 --- a/sys/arm/freescale/imx/std.imx6 +++ b/sys/arm/freescale/imx/std.imx6 @@ -2,7 +2,6 @@ machine arm armv6 cpu CPU_CORTEXA makeoptions CONF_CFLAGS="-march=armv7a" -options ARM_L2_PIPT options KERNVIRTADDR = 0xc2000000 makeoptions KERNVIRTADDR = 0xc2000000 diff --git a/sys/arm/freescale/vybrid/std.vybrid b/sys/arm/freescale/vybrid/std.vybrid index e45dd3fd4ebb..6baf9489ae0b 100644 --- a/sys/arm/freescale/vybrid/std.vybrid +++ b/sys/arm/freescale/vybrid/std.vybrid @@ -7,6 +7,4 @@ makeoptions CONF_CFLAGS="-march=armv7a" makeoptions KERNVIRTADDR=0xc0100000 options KERNVIRTADDR=0xc0100000 -options ARM_L2_PIPT - files "../freescale/vybrid/files.vybrid" diff --git a/sys/arm/mv/armadaxp/std.armadaxp b/sys/arm/mv/armadaxp/std.armadaxp index 23e5423ff304..84361f53eb27 100644 --- a/sys/arm/mv/armadaxp/std.armadaxp +++ b/sys/arm/mv/armadaxp/std.armadaxp @@ -2,5 +2,3 @@ makeoptions KERNVIRTADDR=0xc0200000 options KERNVIRTADDR=0xc0200000 - -options ARM_L2_PIPT diff --git a/sys/arm/qemu/std.virt b/sys/arm/qemu/std.virt index 3adef1e0a355..858e195914ce 100644 --- a/sys/arm/qemu/std.virt +++ b/sys/arm/qemu/std.virt @@ -2,7 +2,6 @@ machine arm armv6 cpu CPU_CORTEXA makeoptions CONF_CFLAGS="-march=armv7a" -options ARM_L2_PIPT options KERNVIRTADDR = 0xc1000000 makeoptions KERNVIRTADDR = 0xc1000000 diff --git a/sys/arm/rockchip/std.rk30xx b/sys/arm/rockchip/std.rk30xx index 6d16291cb4da..431526b1c36e 100644 --- a/sys/arm/rockchip/std.rk30xx +++ b/sys/arm/rockchip/std.rk30xx @@ -8,8 +8,6 @@ makeoptions CONF_CFLAGS="-march=armv7a" makeoptions KERNVIRTADDR=0xc0400000 options KERNVIRTADDR=0xc0400000 -options ARM_L2_PIPT - options IPI_IRQ_START=0 options IPI_IRQ_END=15 diff --git a/sys/arm/samsung/exynos/std.exynos5250 b/sys/arm/samsung/exynos/std.exynos5250 index b4d57b76301f..52e450823d74 100644 --- a/sys/arm/samsung/exynos/std.exynos5250 +++ b/sys/arm/samsung/exynos/std.exynos5250 @@ -7,8 +7,6 @@ makeoptions CONF_CFLAGS="-march=armv7a" makeoptions KERNVIRTADDR=0xc0f00000 options KERNVIRTADDR=0xc0f00000 -options ARM_L2_PIPT - options IPI_IRQ_START=0 options IPI_IRQ_END=15 diff --git a/sys/arm/samsung/exynos/std.exynos5420 b/sys/arm/samsung/exynos/std.exynos5420 index b4d57b76301f..52e450823d74 100644 --- a/sys/arm/samsung/exynos/std.exynos5420 +++ b/sys/arm/samsung/exynos/std.exynos5420 @@ -7,8 +7,6 @@ makeoptions CONF_CFLAGS="-march=armv7a" makeoptions KERNVIRTADDR=0xc0f00000 options KERNVIRTADDR=0xc0f00000 -options ARM_L2_PIPT - options IPI_IRQ_START=0 options IPI_IRQ_END=15 diff --git a/sys/arm/ti/am335x/std.am335x b/sys/arm/ti/am335x/std.am335x index 994be2c97340..b5565c29df92 100644 --- a/sys/arm/ti/am335x/std.am335x +++ b/sys/arm/ti/am335x/std.am335x @@ -7,5 +7,3 @@ options KERNVIRTADDR=0xc0200000 # Used in ldscript.arm makeoptions KERNVIRTADDR=0xc0200000 options SOC_TI_AM335X - -options ARM_L2_PIPT diff --git a/sys/arm/ti/omap4/std.omap4 b/sys/arm/ti/omap4/std.omap4 index c8e456b78bc1..8d5764bbf52d 100644 --- a/sys/arm/ti/omap4/std.omap4 +++ b/sys/arm/ti/omap4/std.omap4 @@ -7,5 +7,3 @@ options KERNVIRTADDR=0xc0200000 # Used in ldscript.arm makeoptions KERNVIRTADDR=0xc0200000 options SOC_OMAP4 - -options ARM_L2_PIPT diff --git a/sys/arm/xilinx/std.zynq7 b/sys/arm/xilinx/std.zynq7 index a5c6c577fa51..addf7bcb663c 100644 --- a/sys/arm/xilinx/std.zynq7 +++ b/sys/arm/xilinx/std.zynq7 @@ -12,7 +12,5 @@ files "../xilinx/files.zynq7" options KERNVIRTADDR=0xc0100000 # Used in ldscript.arm makeoptions KERNVIRTADDR=0xc0100000 -options ARM_L2_PIPT - options IPI_IRQ_START=0 options IPI_IRQ_END=15 -- cgit v1.2.3