aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNathan Whitehorn <nwhitehorn@FreeBSD.org>2010-12-03 16:37:37 +0000
committerNathan Whitehorn <nwhitehorn@FreeBSD.org>2010-12-03 16:37:37 +0000
commite2326639c29931cbfe3a59654e920699220ac3ab (patch)
tree7a1aea613de8d282341d352ea188afde81d4ad75
parentb010326013f0b8cf3e7cd71b8f47a53ac610a260 (diff)
downloadsrc-e2326639c29931cbfe3a59654e920699220ac3ab.tar.gz
src-e2326639c29931cbfe3a59654e920699220ac3ab.zip
Provide a simple IOMMU framework on PowerPC, which is required to support
PPC hypervisors.
Notes
Notes: svn path=/head/; revision=216154
-rw-r--r--sys/conf/files.powerpc1
-rw-r--r--sys/powerpc/include/bus_dma.h4
-rw-r--r--sys/powerpc/powerpc/busdma_machdep.c198
-rw-r--r--sys/powerpc/powerpc/iommu_if.m54
4 files changed, 173 insertions, 84 deletions
diff --git a/sys/conf/files.powerpc b/sys/conf/files.powerpc
index d46b39fcf936..83f75c43fb6d 100644
--- a/sys/conf/files.powerpc
+++ b/sys/conf/files.powerpc
@@ -177,6 +177,7 @@ powerpc/powerpc/fuswintr.c standard
powerpc/powerpc/gdb_machdep.c optional gdb
powerpc/powerpc/in_cksum.c optional inet
powerpc/powerpc/intr_machdep.c standard
+powerpc/powerpc/iommu_if.m standard
powerpc/powerpc/mem.c optional mem
powerpc/powerpc/mmu_if.m standard
powerpc/powerpc/mp_machdep.c optional smp
diff --git a/sys/powerpc/include/bus_dma.h b/sys/powerpc/include/bus_dma.h
index d10a0557bc6b..e070a9423815 100644
--- a/sys/powerpc/include/bus_dma.h
+++ b/sys/powerpc/include/bus_dma.h
@@ -30,4 +30,8 @@
#include <sys/bus_dma.h>
+struct device;
+
+int bus_dma_tag_set_iommu(bus_dma_tag_t, struct device *iommu, void *cookie);
+
#endif /* _POWERPC_BUS_DMA_H_ */
diff --git a/sys/powerpc/powerpc/busdma_machdep.c b/sys/powerpc/powerpc/busdma_machdep.c
index f66413fddf8d..84e3bc6debe6 100644
--- a/sys/powerpc/powerpc/busdma_machdep.c
+++ b/sys/powerpc/powerpc/busdma_machdep.c
@@ -53,7 +53,9 @@ __FBSDID("$FreeBSD$");
#include <machine/bus.h>
#include <machine/md_var.h>
-#define MAX_BPAGES 8192
+#include "iommu_if.h"
+
+#define MAX_BPAGES MIN(8192, physmem/40)
struct bounce_zone;
@@ -73,8 +75,9 @@ struct bus_dma_tag {
int map_count;
bus_dma_lock_t *lockfunc;
void *lockfuncarg;
- bus_dma_segment_t *segments;
struct bounce_zone *bounce_zone;
+ device_t iommu;
+ void *iommu_cookie;
};
struct bounce_page {
@@ -121,6 +124,8 @@ struct bus_dmamap {
bus_dma_tag_t dmat;
void *buf; /* unmapped buffer pointer */
bus_size_t buflen; /* unmapped buffer length */
+ bus_dma_segment_t *segments;
+ int nsegs;
bus_dmamap_callback_t *callback;
void *callback_arg;
STAILQ_ENTRY(bus_dmamap) links;
@@ -128,7 +133,6 @@ struct bus_dmamap {
static STAILQ_HEAD(, bus_dmamap) bounce_map_waitinglist;
static STAILQ_HEAD(, bus_dmamap) bounce_map_callbacklist;
-static struct bus_dmamap nobounce_dmamap;
static void init_bounce_pages(void *dummy);
static int alloc_bounce_zone(bus_dma_tag_t dmat);
@@ -156,10 +160,14 @@ run_filter(bus_dma_tag_t dmat, bus_addr_t paddr)
retval = 0;
do {
- if (((paddr > dmat->lowaddr && paddr <= dmat->highaddr)
- || ((paddr & (dmat->alignment - 1)) != 0))
- && (dmat->filter == NULL
- || (*dmat->filter)(dmat->filterarg, paddr) != 0))
+ if (dmat->filter == NULL && dmat->iommu == NULL &&
+ paddr > dmat->lowaddr && paddr <= dmat->highaddr)
+ retval = 1;
+ if (dmat->filter == NULL &&
+ (paddr & (dmat->alignment - 1)) != 0)
+ retval = 1;
+ if (dmat->filter != NULL &&
+ (*dmat->filter)(dmat->filterarg, paddr) != 0)
retval = 1;
dmat = dmat->parent;
@@ -258,7 +266,6 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
newtag->lockfunc = dflt_lock;
newtag->lockfuncarg = NULL;
}
- newtag->segments = NULL;
/* Take into account any restrictions imposed by our parent tag */
if (parent != NULL) {
@@ -280,10 +287,14 @@ bus_dma_tag_create(bus_dma_tag_t parent, bus_size_t alignment,
}
if (newtag->parent != NULL)
atomic_add_int(&parent->ref_count, 1);
+ newtag->iommu = parent->iommu;
+ newtag->iommu_cookie = parent->iommu_cookie;
}
- if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem)
- || newtag->alignment > 1)
+ if (newtag->lowaddr < ptoa((vm_paddr_t)Maxmem) && newtag->iommu == NULL)
+ newtag->flags |= BUS_DMA_COULD_BOUNCE;
+
+ if (newtag->alignment > 1)
newtag->flags |= BUS_DMA_COULD_BOUNCE;
if (((newtag->flags & BUS_DMA_COULD_BOUNCE) != 0) &&
@@ -343,8 +354,6 @@ bus_dma_tag_destroy(bus_dma_tag_t dmat)
parent = dmat->parent;
atomic_subtract_int(&dmat->ref_count, 1);
if (dmat->ref_count == 0) {
- if (dmat->segments != NULL)
- free(dmat->segments, M_DEVBUF);
free(dmat, M_DEVBUF);
/*
* Last reference count, so
@@ -372,17 +381,15 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
error = 0;
- if (dmat->segments == NULL) {
- dmat->segments = (bus_dma_segment_t *)malloc(
- sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
- M_NOWAIT);
- if (dmat->segments == NULL) {
- CTR3(KTR_BUSDMA, "%s: tag %p error %d",
- __func__, dmat, ENOMEM);
- return (ENOMEM);
- }
+ *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
+ M_NOWAIT | M_ZERO);
+ if (*mapp == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
}
+
/*
* Bouncing might be required if the driver asks for an active
* exclusion region, a data alignment that is stricter than 1, and/or
@@ -400,14 +407,6 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
bz = dmat->bounce_zone;
- *mapp = (bus_dmamap_t)malloc(sizeof(**mapp), M_DEVBUF,
- M_NOWAIT | M_ZERO);
- if (*mapp == NULL) {
- CTR3(KTR_BUSDMA, "%s: tag %p error %d",
- __func__, dmat, ENOMEM);
- return (ENOMEM);
- }
-
/* Initialize the new map */
STAILQ_INIT(&((*mapp)->bpages));
@@ -437,9 +436,18 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
}
}
bz->map_count++;
- } else {
- *mapp = NULL;
}
+
+ (*mapp)->nsegs = 0;
+ (*mapp)->segments = (bus_dma_segment_t *)malloc(
+ sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
+ M_NOWAIT);
+ if ((*mapp)->segments == NULL) {
+ CTR3(KTR_BUSDMA, "%s: tag %p error %d",
+ __func__, dmat, ENOMEM);
+ return (ENOMEM);
+ }
+
if (error == 0)
dmat->map_count++;
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
@@ -454,7 +462,7 @@ bus_dmamap_create(bus_dma_tag_t dmat, int flags, bus_dmamap_t *mapp)
int
bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
{
- if (map != NULL && map != &nobounce_dmamap) {
+ if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
if (STAILQ_FIRST(&map->bpages) != NULL) {
CTR3(KTR_BUSDMA, "%s: tag %p error %d",
__func__, dmat, EBUSY);
@@ -462,8 +470,9 @@ bus_dmamap_destroy(bus_dma_tag_t dmat, bus_dmamap_t map)
}
if (dmat->bounce_zone)
dmat->bounce_zone->map_count--;
- free(map, M_DEVBUF);
}
+ free(map->segments, M_DEVBUF);
+ free(map, M_DEVBUF);
dmat->map_count--;
CTR2(KTR_BUSDMA, "%s: tag %p error 0", __func__, dmat);
return (0);
@@ -486,19 +495,8 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
else
mflags = M_WAITOK;
- /* If we succeed, no mapping/bouncing will be required */
- *mapp = NULL;
-
- if (dmat->segments == NULL) {
- dmat->segments = (bus_dma_segment_t *)malloc(
- sizeof(bus_dma_segment_t) * dmat->nsegments, M_DEVBUF,
- mflags);
- if (dmat->segments == NULL) {
- CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
- __func__, dmat, dmat->flags, ENOMEM);
- return (ENOMEM);
- }
- }
+ bus_dmamap_create(dmat, flags, mapp);
+
if (flags & BUS_DMA_ZERO)
mflags |= M_ZERO;
@@ -535,7 +533,7 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
#ifdef NOTYET
if (flags & BUS_DMA_NOCACHE)
pmap_change_attr((vm_offset_t)*vaddr, dmat->maxsize,
- PAT_UNCACHEABLE);
+ VM_MEMATTR_UNCACHEABLE);
#endif
CTR4(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d",
__func__, dmat, dmat->flags, 0);
@@ -549,14 +547,10 @@ bus_dmamem_alloc(bus_dma_tag_t dmat, void** vaddr, int flags,
void
bus_dmamem_free(bus_dma_tag_t dmat, void *vaddr, bus_dmamap_t map)
{
- /*
- * dmamem does not need to be bounced, so the map should be
- * NULL
- */
- if (map != NULL)
- panic("bus_dmamem_free: Invalid map freed\n");
+ bus_dmamap_destroy(dmat, map);
+
#ifdef NOTYET
- pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, PAT_WRITE_BACK);
+ pmap_change_attr((vm_offset_t)vaddr, dmat->maxsize, VM_MEMATTR_DEFAULT);
#endif
if ((dmat->maxsize <= PAGE_SIZE) &&
(dmat->alignment < dmat->maxsize) &&
@@ -591,18 +585,13 @@ _bus_dmamap_load_buffer(bus_dma_tag_t dmat,
bus_addr_t paddr;
int seg;
- if (map == NULL)
- map = &nobounce_dmamap;
-
- if ((map != &nobounce_dmamap && map->pagesneeded == 0)
- && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) {
+ if (map->pagesneeded == 0 && ((dmat->flags & BUS_DMA_COULD_BOUNCE) != 0)) {
vm_offset_t vendaddr;
CTR4(KTR_BUSDMA, "lowaddr= %d Maxmem= %d, boundary= %d, "
"alignment= %d", dmat->lowaddr, ptoa((vm_paddr_t)Maxmem),
dmat->boundary, dmat->alignment);
- CTR3(KTR_BUSDMA, "map= %p, nobouncemap= %p, pagesneeded= %d",
- map, &nobounce_dmamap, map->pagesneeded);
+ CTR2(KTR_BUSDMA, "map= %p, pagesneeded= %d", map, map->pagesneeded);
/*
* Count the number of bounce pages
* needed in order to complete this transfer
@@ -731,29 +720,36 @@ bus_dmamap_load(bus_dma_tag_t dmat, bus_dmamap_t map, void *buf,
bus_size_t buflen, bus_dmamap_callback_t *callback,
void *callback_arg, int flags)
{
- bus_addr_t lastaddr = 0;
- int error, nsegs = 0;
+ bus_addr_t lastaddr = 0;
+ int error;
- if (map != NULL) {
+ if (dmat->flags & BUS_DMA_COULD_BOUNCE) {
flags |= BUS_DMA_WAITOK;
map->callback = callback;
map->callback_arg = callback_arg;
}
+ map->nsegs = 0;
error = _bus_dmamap_load_buffer(dmat, map, buf, buflen, NULL, flags,
- &lastaddr, dmat->segments, &nsegs, 1);
+ &lastaddr, map->segments, &map->nsegs, 1);
+ map->nsegs++;
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
- __func__, dmat, dmat->flags, error, nsegs + 1);
+ __func__, dmat, dmat->flags, error, map->nsegs);
if (error == EINPROGRESS) {
return (error);
}
+ if (dmat->iommu != NULL)
+ IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr,
+ dmat->highaddr, dmat->alignment, dmat->boundary,
+ dmat->iommu_cookie);
+
if (error)
- (*callback)(callback_arg, dmat->segments, 0, error);
+ (*callback)(callback_arg, map->segments, 0, error);
else
- (*callback)(callback_arg, dmat->segments, nsegs + 1, 0);
+ (*callback)(callback_arg, map->segments, map->nsegs, 0);
/*
* Return ENOMEM to the caller so that it can pass it up the stack.
@@ -775,12 +771,12 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
bus_dmamap_callback2_t *callback, void *callback_arg,
int flags)
{
- int nsegs, error;
+ int error;
M_ASSERTPKTHDR(m0);
flags |= BUS_DMA_NOWAIT;
- nsegs = 0;
+ map->nsegs = 0;
error = 0;
if (m0->m_pkthdr.len <= dmat->maxsize) {
int first = 1;
@@ -792,7 +788,7 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
error = _bus_dmamap_load_buffer(dmat, map,
m->m_data, m->m_len,
NULL, flags, &lastaddr,
- dmat->segments, &nsegs, first);
+ map->segments, &map->nsegs, first);
first = 0;
}
}
@@ -800,15 +796,21 @@ bus_dmamap_load_mbuf(bus_dma_tag_t dmat, bus_dmamap_t map,
error = EINVAL;
}
+ map->nsegs++;
+ if (dmat->iommu != NULL)
+ IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr,
+ dmat->highaddr, dmat->alignment, dmat->boundary,
+ dmat->iommu_cookie);
+
if (error) {
/* force "no valid mappings" in callback */
- (*callback)(callback_arg, dmat->segments, 0, 0, error);
+ (*callback)(callback_arg, map->segments, 0, 0, error);
} else {
- (*callback)(callback_arg, dmat->segments,
- nsegs+1, m0->m_pkthdr.len, error);
+ (*callback)(callback_arg, map->segments,
+ map->nsegs, m0->m_pkthdr.len, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
- __func__, dmat, dmat->flags, error, nsegs + 1);
+ __func__, dmat, dmat->flags, error, map->nsegs);
return (error);
}
@@ -844,6 +846,15 @@ bus_dmamap_load_mbuf_sg(bus_dma_tag_t dmat, bus_dmamap_t map,
/* XXX FIXME: Having to increment nsegs is really annoying */
++*nsegs;
+
+ if (dmat->iommu != NULL)
+ IOMMU_MAP(dmat->iommu, segs, nsegs, dmat->lowaddr,
+ dmat->highaddr, dmat->alignment, dmat->boundary,
+ dmat->iommu_cookie);
+
+ map->nsegs = *nsegs;
+ memcpy(map->segments, segs, map->nsegs*sizeof(segs[0]));
+
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
__func__, dmat, dmat->flags, error, *nsegs);
return (error);
@@ -859,7 +870,7 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
int flags)
{
bus_addr_t lastaddr = 0;
- int nsegs, error, first, i;
+ int error, first, i;
bus_size_t resid;
struct iovec *iov;
pmap_t pmap;
@@ -875,7 +886,7 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
} else
pmap = NULL;
- nsegs = 0;
+ map->nsegs = 0;
error = 0;
first = 1;
for (i = 0; i < uio->uio_iovcnt && resid != 0 && !error; i++) {
@@ -890,22 +901,28 @@ bus_dmamap_load_uio(bus_dma_tag_t dmat, bus_dmamap_t map,
if (minlen > 0) {
error = _bus_dmamap_load_buffer(dmat, map,
addr, minlen, pmap, flags, &lastaddr,
- dmat->segments, &nsegs, first);
+ map->segments, &map->nsegs, first);
first = 0;
resid -= minlen;
}
}
+ map->nsegs++;
+ if (dmat->iommu != NULL)
+ IOMMU_MAP(dmat->iommu, map->segments, &map->nsegs, dmat->lowaddr,
+ dmat->highaddr, dmat->alignment, dmat->boundary,
+ dmat->iommu_cookie);
+
if (error) {
/* force "no valid mappings" in callback */
- (*callback)(callback_arg, dmat->segments, 0, 0, error);
+ (*callback)(callback_arg, map->segments, 0, 0, error);
} else {
- (*callback)(callback_arg, dmat->segments,
- nsegs+1, uio->uio_resid, error);
+ (*callback)(callback_arg, map->segments,
+ map->nsegs, uio->uio_resid, error);
}
CTR5(KTR_BUSDMA, "%s: tag %p tag flags 0x%x error %d nsegs %d",
- __func__, dmat, dmat->flags, error, nsegs + 1);
+ __func__, dmat, dmat->flags, error, map->nsegs);
return (error);
}
@@ -917,6 +934,11 @@ _bus_dmamap_unload(bus_dma_tag_t dmat, bus_dmamap_t map)
{
struct bounce_page *bpage;
+ if (dmat->iommu) {
+ IOMMU_UNMAP(dmat->iommu, map->segments, map->nsegs, dmat->iommu_cookie);
+ map->nsegs = 0;
+ }
+
while ((bpage = STAILQ_FIRST(&map->bpages)) != NULL) {
STAILQ_REMOVE_HEAD(&map->bpages, links);
free_bounce_page(dmat, bpage);
@@ -1122,8 +1144,6 @@ add_bounce_page(bus_dma_tag_t dmat, bus_dmamap_t map, vm_offset_t vaddr,
struct bounce_page *bpage;
KASSERT(dmat->bounce_zone != NULL, ("no bounce zone in dma tag"));
- KASSERT(map != NULL && map != &nobounce_dmamap,
- ("add_bounce_page: bad map %p", map));
bz = dmat->bounce_zone;
if (map->pagesneeded == 0)
@@ -1210,3 +1230,13 @@ busdma_swi(void)
}
mtx_unlock(&bounce_lock);
}
+
+int
+bus_dma_tag_set_iommu(bus_dma_tag_t tag, struct device *iommu, void *cookie)
+{
+ tag->iommu = iommu;
+ tag->iommu_cookie = cookie;
+
+ return (0);
+}
+
diff --git a/sys/powerpc/powerpc/iommu_if.m b/sys/powerpc/powerpc/iommu_if.m
new file mode 100644
index 000000000000..dec70e3df0f4
--- /dev/null
+++ b/sys/powerpc/powerpc/iommu_if.m
@@ -0,0 +1,54 @@
+#-
+# Copyright (c) 2010 Nathan Whitehorn
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+# 1. Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright
+# notice, this list of conditions and the following disclaimer in the
+# documentation and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
+# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+# SUCH DAMAGE.
+#
+# from: src/sys/kern/bus_if.m,v 1.21 2002/04/21 11:16:10 markm Exp
+# $FreeBSD$
+#
+
+#include <machine/bus.h>
+
+#include <sys/bus.h>
+#include <sys/bus_dma.h>
+
+INTERFACE iommu;
+
+METHOD int map {
+ device_t iommu;
+ bus_dma_segment_t *segs;
+ int *nsegs;
+ bus_addr_t lowaddr;
+ bus_addr_t highaddr;
+ bus_size_t alignment;
+ bus_size_t boundary;
+ void *cookie;
+};
+
+METHOD int unmap {
+ device_t iommu;
+ bus_dma_segment_t *segs;
+ int nsegs;
+ void *cookie;
+};
+